source
stringlengths
3
86
python
stringlengths
75
1.04M
test_callbacks.py
import os import multiprocessing import numpy as np import pytest from csv import reader from csv import Sniffer import shutil from keras import optimizers from keras import initializers from keras import callbacks from keras.models import Sequential, Model from keras.layers import Input, Dense, Dropout, add from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D from keras.utils.test_utils import get_test_data from keras.utils.test_utils import keras_test from keras import backend as K from keras.utils import np_utils input_dim = 2 num_hidden = 4 num_classes = 2 batch_size = 5 train_samples = 20 test_samples = 20 @keras_test def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0]) @keras_test def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp) @keras_test def test_ModelCheckpoint(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'checkpoint.h5') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) # case 1 monitor = 'val_loss' save_best_only = False mode = 'auto' model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 2 mode = 'min' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 3 mode = 'max' monitor = 'val_acc' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 4 save_best_only = True cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 5 save_best_only = False period = 2 mode = 'auto' filepath = 'checkpoint.{epoch:02d}.h5' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, period=period)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=4) assert os.path.isfile(filepath.format(epoch=2)) assert os.path.isfile(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=3)) os.remove(filepath.format(epoch=2)) os.remove(filepath.format(epoch=4)) assert not tmpdir.listdir() @keras_test def test_EarlyStopping(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) mode = 'max' monitor = 'val_acc' patience = 0 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) mode = 'auto' monitor = 'val_acc' patience = 2 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) @keras_test def test_EarlyStopping_reuse(): np.random.seed(1337) patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = Sequential(( Dense(1, input_dim=1, activation='relu'), Dense(1, activation='sigmoid'), )) model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) stopper = callbacks.EarlyStopping(monitor='acc', patience=patience) weights = model.get_weights() hist = model.fit(data, labels, callbacks=[stopper]) assert len(hist.epoch) >= patience # This should allow training to go for at least `patience` epochs model.set_weights(weights) hist = model.fit(data, labels, callbacks=[stopper]) assert len(hist.epoch) >= patience @keras_test def test_EarlyStopping_patience(): class DummyModel(object): def __init__(self): self.stop_training = False early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2) early_stop.model = DummyModel() losses = [0.0860, 0.1096, 0.1040, 0.1019] # Should stop after epoch 3, as the loss has not improved after patience=2 epochs. epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break assert epochs_trained == 3 @keras_test def test_LearningRateScheduler(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() @keras_test def test_ReduceLROnPlateau(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon()) model = make_model() cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon()) @keras_test def test_CSVLogger(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'log.tsv') sep = '\t' (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model # case 1, create new file with defined separator model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) with open(filepath) as csvfile: dialect = Sniffer().sniff(csvfile.read()) assert dialect.delimiter == sep del model del cbks # case 2, append data to existing file, skip header model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) # case 3, reuse of CSVLogger object model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) import re with open(filepath) as csvfile: output = " ".join(csvfile.readlines()) assert len(re.findall('epoch', output)) == 1 os.remove(filepath) assert not tmpdir.listdir() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0), epochs=3) # fit with validation data and accuracy model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=0), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data should raise ValueError if histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=1), epochs=3) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator without validation data should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator with validation data generator should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=data_generator(False), validation_steps=1, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard_multi_input_output(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2, [y_train[i * batch_size: (i + 1) * batch_size]] * 2) else: yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2, [y_test[i * batch_size: (i + 1) * batch_size]] * 2) i += 1 i = i % max_batch_index inp1 = Input((input_dim,)) inp2 = Input((input_dim,)) inp = add([inp1, inp2]) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output1 = Dense(num_classes, activation='softmax')(hidden) output2 = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=[inp1, inp2], outputs=[output1, output2]) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0), epochs=3) # fit with validation data and accuracy model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard_convnet(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=num_classes) y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) model = Sequential([ Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), MaxPooling2D(pool_size=2), Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), GlobalAveragePooling2D(), Dense(num_classes, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1, write_images=True, write_grads=True, batch_size=16) cbks = [tsb] model.summary() history = model.fit(x_train, y_train, epochs=2, batch_size=16, validation_data=(x_test, y_test), callbacks=cbks, verbose=0) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def test_CallbackValData(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=[cbk], epochs=1) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit_generator(data_generator(True), len(X_train), epochs=1, validation_data=(X_test, y_test), callbacks=[cbk2]) # callback validation data should always have x, y, and sample weights assert len(cbk.validation_data) == len(cbk2.validation_data) == 3 assert cbk.validation_data[0] is cbk2.validation_data[0] assert cbk.validation_data[1] is cbk2.validation_data[1] assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape @keras_test def test_LambdaCallback(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # Start an arbitrary process that should run during model training and be terminated after training has completed. def f(): while True: pass p = multiprocessing.Process(target=f) p.start() cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate()) cbks = [cleanup_callback] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) p.join() assert not p.is_alive() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason="Requires TensorFlow backend") def test_TensorBoard_with_ReduceLROnPlateau(tmpdir): import shutil np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=4, verbose=1), callbacks.TensorBoard( log_dir=filepath)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=2) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() if __name__ == '__main__': pytest.main([__file__])
sanitylib.py
#!/usr/bin/env python3 # vim: set syntax=python ts=4 : # # Copyright (c) 2018 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import contextlib import string import mmap import sys import re import subprocess import select import shutil import shlex import signal import threading import concurrent.futures from collections import OrderedDict from threading import BoundedSemaphore import queue import time import csv import glob import concurrent import xml.etree.ElementTree as ET import logging from pathlib import Path from distutils.spawn import find_executable from colorama import Fore import yaml import platform try: import serial except ImportError: print("Install pyserial python module with pip to use --device-testing option.") try: from tabulate import tabulate except ImportError: print("Install tabulate python module with pip to use --device-testing option.") ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") if not ZEPHYR_BASE: sys.exit("$ZEPHYR_BASE environment variable undefined") sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts")) import edtlib hw_map_local = threading.Lock() report_lock = threading.Lock() # Use this for internal comparisons; that's what canonicalization is # for. Don't use it when invoking other components of the build system # to avoid confusing and hard to trace inconsistencies in error messages # and logs, generated Makefiles, etc. compared to when users invoke these # components directly. # Note "normalization" is different from canonicalization, see os.path. canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE) sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/")) from sanity_chk import scl from sanity_chk import expr_parser logger = logging.getLogger('sanitycheck') logger.setLevel(logging.DEBUG) pipeline = queue.LifoQueue() class CMakeCacheEntry: '''Represents a CMake cache entry. This class understands the type system in a CMakeCache.txt, and converts the following cache types to Python types: Cache Type Python type ---------- ------------------------------------------- FILEPATH str PATH str STRING str OR list of str (if ';' is in the value) BOOL bool INTERNAL str OR list of str (if ';' is in the value) ---------- ------------------------------------------- ''' # Regular expression for a cache entry. # # CMake variable names can include escape characters, allowing a # wider set of names than is easy to match with a regular # expression. To be permissive here, use a non-greedy match up to # the first colon (':'). This breaks if the variable name has a # colon inside, but it's good enough. CACHE_ENTRY = re.compile( r'''(?P<name>.*?) # name :(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type =(?P<value>.*) # value ''', re.X) @classmethod def _to_bool(cls, val): # Convert a CMake BOOL string into a Python bool. # # "True if the constant is 1, ON, YES, TRUE, Y, or a # non-zero number. False if the constant is 0, OFF, NO, # FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in # the suffix -NOTFOUND. Named boolean constants are # case-insensitive. If the argument is not one of these # constants, it is treated as a variable." # # https://cmake.org/cmake/help/v3.0/command/if.html val = val.upper() if val in ('ON', 'YES', 'TRUE', 'Y'): return 1 elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''): return 0 elif val.endswith('-NOTFOUND'): return 0 else: try: v = int(val) return v != 0 except ValueError as exc: raise ValueError('invalid bool {}'.format(val)) from exc @classmethod def from_line(cls, line, line_no): # Comments can only occur at the beginning of a line. # (The value of an entry could contain a comment character). if line.startswith('//') or line.startswith('#'): return None # Whitespace-only lines do not contain cache entries. if not line.strip(): return None m = cls.CACHE_ENTRY.match(line) if not m: return None name, type_, value = (m.group(g) for g in ('name', 'type', 'value')) if type_ == 'BOOL': try: value = cls._to_bool(value) except ValueError as exc: args = exc.args + ('on line {}: {}'.format(line_no, line),) raise ValueError(args) from exc elif type_ in ['STRING', 'INTERNAL']: # If the value is a CMake list (i.e. is a string which # contains a ';'), convert to a Python list. if ';' in value: value = value.split(';') return CMakeCacheEntry(name, value) def __init__(self, name, value): self.name = name self.value = value def __str__(self): fmt = 'CMakeCacheEntry(name={}, value={})' return fmt.format(self.name, self.value) class CMakeCache: '''Parses and represents a CMake cache file.''' @staticmethod def from_file(cache_file): return CMakeCache(cache_file) def __init__(self, cache_file): self.cache_file = cache_file self.load(cache_file) def load(self, cache_file): entries = [] with open(cache_file, 'r') as cache: for line_no, line in enumerate(cache): entry = CMakeCacheEntry.from_line(line, line_no) if entry: entries.append(entry) self._entries = OrderedDict((e.name, e) for e in entries) def get(self, name, default=None): entry = self._entries.get(name) if entry is not None: return entry.value else: return default def get_list(self, name, default=None): if default is None: default = [] entry = self._entries.get(name) if entry is not None: value = entry.value if isinstance(value, list): return value elif isinstance(value, str): return [value] if value else [] else: msg = 'invalid value {} type {}' raise RuntimeError(msg.format(value, type(value))) else: return default def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name].value def __setitem__(self, name, entry): if not isinstance(entry, CMakeCacheEntry): msg = 'improper type {} for value {}, expecting CMakeCacheEntry' raise TypeError(msg.format(type(entry), entry)) self._entries[name] = entry def __delitem__(self, name): del self._entries[name] def __iter__(self): return iter(self._entries.values()) class SanityCheckException(Exception): pass class SanityRuntimeError(SanityCheckException): pass class ConfigurationError(SanityCheckException): def __init__(self, cfile, message): SanityCheckException.__init__(self, cfile + ": " + message) class BuildError(SanityCheckException): pass class ExecutionError(SanityCheckException): pass class HarnessImporter: def __init__(self, name): sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk")) module = __import__("harness") if name: my_class = getattr(module, name) else: my_class = getattr(module, "Test") self.instance = my_class() class Handler: def __init__(self, instance, type_str="build"): """Constructor """ self.lock = threading.Lock() self.state = "waiting" self.run = False self.duration = 0 self.type_str = type_str self.binary = None self.pid_fn = None self.call_make_run = False self.name = instance.name self.instance = instance self.timeout = instance.testcase.timeout self.sourcedir = instance.testcase.source_dir self.build_dir = instance.build_dir self.log = os.path.join(self.build_dir, "handler.log") self.returncode = 0 self.set_state("running", self.duration) self.generator = None self.generator_cmd = None self.args = [] def set_state(self, state, duration): self.lock.acquire() self.state = state self.duration = duration self.lock.release() def get_state(self): self.lock.acquire() ret = (self.state, self.duration) self.lock.release() return ret def record(self, harness): if harness.recording: filename = os.path.join(self.build_dir, "recording.csv") with open(filename, "at") as csvfile: cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep) cw.writerow(harness.fieldnames) for instance in harness.recording: cw.writerow(instance) class BinaryHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.terminated = False # Tool options self.valgrind = False self.lsan = False self.asan = False self.coverage = False def try_kill_process_by_pid(self): if self.pid_fn: pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) self.pid_fn = None # clear so we don't try to kill the binary twice try: os.kill(pid, signal.SIGTERM) except ProcessLookupError: pass def terminate(self, proc): # encapsulate terminate functionality so we do it consistently where ever # we might want to terminate the proc. We need try_kill_process_by_pid # because of both how newer ninja (1.6.0 or greater) and .NET / renode # work. Newer ninja's don't seem to pass SIGTERM down to the children # so we need to use try_kill_process_by_pid. self.try_kill_process_by_pid() proc.terminate() # sleep for a while before attempting to kill time.sleep(0.5) proc.kill() self.terminated = True def _output_reader(self, proc, harness): log_out_fp = open(self.log, "wt") for line in iter(proc.stdout.readline, b''): logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip())) log_out_fp.write(line.decode('utf-8')) log_out_fp.flush() harness.handle(line.decode('utf-8').rstrip()) if harness.state: try: # POSIX arch based ztests end on their own, # so let's give it up to 100ms to do so proc.wait(0.1) except subprocess.TimeoutExpired: self.terminate(proc) break log_out_fp.close() def handle(self): harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) if self.call_make_run: command = [self.generator_cmd, "run"] else: command = [self.binary] run_valgrind = False if self.valgrind and shutil.which("valgrind"): command = ["valgrind", "--error-exitcode=2", "--leak-check=full", "--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp", "--log-file=" + self.build_dir + "/valgrind.log" ] + command run_valgrind = True logger.debug("Spawning process: " + " ".join(shlex.quote(word) for word in command) + os.linesep + "in directory: " + self.build_dir) start_time = time.time() env = os.environ.copy() if self.asan: env["ASAN_OPTIONS"] = "log_path=stdout:" + \ env.get("ASAN_OPTIONS", "") if not self.lsan: env["ASAN_OPTIONS"] += "detect_leaks=0" with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc: logger.debug("Spawning BinaryHandler Thread for %s" % self.name) t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True) t.start() t.join(self.timeout) if t.is_alive(): self.terminate(proc) t.join() proc.wait() self.returncode = proc.returncode handler_time = time.time() - start_time if self.coverage: subprocess.call(["GCOV_PREFIX=" + self.build_dir, "gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True) self.try_kill_process_by_pid() # FIXME: This is needed when killing the simulator, the console is # garbled and needs to be reset. Did not find a better way to do that. subprocess.call(["stty", "sane"]) self.instance.results = harness.tests if not self.terminated and self.returncode != 0: # When a process is killed, the default handler returns 128 + SIGTERM # so in that case the return code itself is not meaningful self.set_state("failed", handler_time) self.instance.reason = "Failed" elif run_valgrind and self.returncode == 2: self.set_state("failed", handler_time) self.instance.reason = "Valgrind error" elif harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state("timeout", handler_time) self.instance.reason = "Timeout" self.record(harness) class DeviceHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.suite = None def monitor_serial(self, ser, halt_fileno, harness): log_out_fp = open(self.log, "wt") ser_fileno = ser.fileno() readlist = [halt_fileno, ser_fileno] while ser.isOpen(): readable, _, _ = select.select(readlist, [], [], self.timeout) if halt_fileno in readable: logger.debug('halted') ser.close() break if ser_fileno not in readable: continue # Timeout. serial_line = None try: serial_line = ser.readline() except TypeError: pass except serial.SerialException: ser.close() break # Just because ser_fileno has data doesn't mean an entire line # is available yet. if serial_line: sl = serial_line.decode('utf-8', 'ignore').lstrip() logger.debug("DEVICE: {0}".format(sl.rstrip())) log_out_fp.write(sl) log_out_fp.flush() harness.handle(sl.rstrip()) if harness.state: ser.close() break log_out_fp.close() def device_is_available(self, device): for i in self.suite.connected_hardware: if i['platform'] == device and i['available'] and i['serial']: return True return False def get_available_device(self, device): for i in self.suite.connected_hardware: if i['platform'] == device and i['available'] and i['serial']: i['available'] = False i['counter'] += 1 return i return None def make_device_available(self, serial): with hw_map_local: for i in self.suite.connected_hardware: if i['serial'] == serial: i['available'] = True @staticmethod def run_custom_script(script, timeout): with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: stdout, _ = proc.communicate(timeout=timeout) logger.debug(stdout.decode()) except subprocess.TimeoutExpired: proc.kill() proc.communicate() logger.error("{} timed out".format(script)) def handle(self): out_state = "failed" if self.suite.west_flash: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] if self.suite.west_runner: command.append("--runner") command.append(self.suite.west_runner) # There are three ways this option is used. # 1) bare: --west-flash # This results in options.west_flash == [] # 2) with a value: --west-flash="--board-id=42" # This results in options.west_flash == "--board-id=42" # 3) Multiple values: --west-flash="--board-id=42,--erase" # This results in options.west_flash == "--board-id=42 --erase" if self.suite.west_flash != []: command.append('--') command.extend(self.suite.west_flash.split(',')) else: command = [self.generator_cmd, "-C", self.build_dir, "flash"] while not self.device_is_available(self.instance.platform.name): logger.debug("Waiting for device {} to become available".format(self.instance.platform.name)) time.sleep(1) hardware = self.get_available_device(self.instance.platform.name) runner = hardware.get('runner', None) if runner: board_id = hardware.get("probe_id", hardware.get("id", None)) product = hardware.get("product", None) command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] command.append("--runner") command.append(hardware.get('runner', None)) if runner == "pyocd": command.append("--board-id") command.append(board_id) elif runner == "nrfjprog": command.append('--') command.append("--snr") command.append(board_id) elif runner == "openocd" and product == "STM32 STLink": command.append('--') command.append("--cmd-pre-init") command.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "EDBG CMSIS-DAP": command.append('--') command.append("--cmd-pre-init") command.append("cmsis_dap_serial %s" % (board_id)) elif runner == "jlink": command.append("--tool-opt=-SelectEmuBySN %s" % (board_id)) serial_device = hardware['serial'] try: ser = serial.Serial( serial_device, baudrate=115200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=self.timeout ) except serial.SerialException as e: self.set_state("failed", 0) self.instance.reason = "Failed" logger.error("Serial device error: %s" % (str(e))) self.make_device_available(serial_device) return ser.flush() harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) read_pipe, write_pipe = os.pipe() start_time = time.time() pre_script = hardware.get('pre_script') post_flash_script = hardware.get('post_flash_script') post_script = hardware.get('post_script') if pre_script: self.run_custom_script(pre_script, 30) t = threading.Thread(target=self.monitor_serial, daemon=True, args=(ser, read_pipe, harness)) t.start() d_log = "{}/device.log".format(self.instance.build_dir) logger.debug('Flash command: %s', command) try: stdout = stderr = None with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: (stdout, stderr) = proc.communicate(timeout=30) logger.debug(stdout.decode()) if proc.returncode != 0: self.instance.reason = "Device issue (Flash?)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) except subprocess.TimeoutExpired: proc.kill() (stdout, stderr) = proc.communicate() self.instance.reason = "Device issue (Timeout)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) except subprocess.CalledProcessError: os.write(write_pipe, b'x') # halt the thread if post_flash_script: self.run_custom_script(post_flash_script, 30) t.join(self.timeout) if t.is_alive(): logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name)) out_state = "timeout" if ser.isOpen(): ser.close() os.close(write_pipe) os.close(read_pipe) handler_time = time.time() - start_time if out_state == "timeout": for c in self.instance.testcase.cases: if c not in harness.tests: harness.tests[c] = "BLOCK" self.instance.reason = "Timeout" self.instance.results = harness.tests if harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state(out_state, handler_time) if post_script: self.run_custom_script(post_script, 30) self.make_device_available(serial_device) self.record(harness) class QEMUHandler(Handler): """Spawns a thread to monitor QEMU output from pipes We pass QEMU_PIPE to 'make run' and monitor the pipes for output. We need to do this as once qemu starts, it runs forever until killed. Test cases emit special messages to the console as they run, we check for these to collect whether the test passed or failed. """ def __init__(self, instance, type_str): """Constructor @param instance Test instance """ super().__init__(instance, type_str) self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(instance.build_dir, "qemu.pid") @staticmethod def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness): fifo_in = fifo_fn + ".in" fifo_out = fifo_fn + ".out" # These in/out nodes are named from QEMU's perspective, not ours if os.path.exists(fifo_in): os.unlink(fifo_in) os.mkfifo(fifo_in) if os.path.exists(fifo_out): os.unlink(fifo_out) os.mkfifo(fifo_out) # We don't do anything with out_fp but we need to open it for # writing so that QEMU doesn't block, due to the way pipes work out_fp = open(fifo_in, "wb") # Disable internal buffering, we don't # want read() or poll() to ever block if there is data in there in_fp = open(fifo_out, "rb", buffering=0) log_out_fp = open(logfile, "wt") start_time = time.time() timeout_time = start_time + timeout p = select.poll() p.register(in_fp, select.POLLIN) out_state = None line = "" timeout_extended = False while True: this_timeout = int((timeout_time - time.time()) * 1000) if this_timeout < 0 or not p.poll(this_timeout): if not out_state: out_state = "timeout" break try: c = in_fp.read(1).decode("utf-8") except UnicodeDecodeError: # Test is writing something weird, fail out_state = "unexpected byte" break if c == "": # EOF, this shouldn't happen unless QEMU crashes out_state = "unexpected eof" break line = line + c if c != "\n": continue # line contains a full line of data output from QEMU log_out_fp.write(line) log_out_fp.flush() line = line.strip() logger.debug("QEMU: %s" % line) harness.handle(line) if harness.state: # if we have registered a fail make sure the state is not # overridden by a false success message coming from the # testsuite if out_state != 'failed': out_state = harness.state # if we get some state, that means test is doing well, we reset # the timeout and wait for 2 more seconds to catch anything # printed late. We wait much longer if code # coverage is enabled since dumping this information can # take some time. if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 line = "" handler.record(harness) handler_time = time.time() - start_time logger.debug("QEMU complete (%s) after %f seconds" % (out_state, handler_time)) handler.set_state(out_state, handler_time) if out_state == "timeout": handler.instance.reason = "Timeout" elif out_state == "failed": handler.instance.reason = "Failed" log_out_fp.close() out_fp.close() in_fp.close() if os.path.exists(pid_fn): pid = int(open(pid_fn).read()) os.unlink(pid_fn) try: if pid: os.kill(pid, signal.SIGTERM) except ProcessLookupError: # Oh well, as long as it's dead! User probably sent Ctrl-C pass os.unlink(fifo_in) os.unlink(fifo_out) def handle(self): self.results = {} self.run = True # We pass this to QEMU which looks for fifos with .in and .out # suffixes. self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid") if os.path.exists(self.pid_fn): os.unlink(self.pid_fn) self.log_fn = self.log harness_import = HarnessImporter(self.instance.testcase.harness.capitalize()) harness = harness_import.instance harness.configure(self.instance) self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread, args=(self, self.timeout, self.build_dir, self.log_fn, self.fifo_fn, self.pid_fn, self.results, harness)) self.instance.results = harness.tests self.thread.daemon = True logger.debug("Spawning QEMUHandler Thread for %s" % self.name) self.thread.start() subprocess.call(["stty", "sane"]) logger.debug("Running %s (%s)" % (self.name, self.type_str)) command = [self.generator_cmd] command += ["-C", self.build_dir, "run"] with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc: logger.debug("Spawning QEMUHandler Thread for %s" % self.name) proc.wait() self.returncode = proc.returncode if self.returncode != 0: self.set_state("failed", 0) self.instance.reason = "Exited with {}".format(self.returncode) def get_fifo(self): return self.fifo_fn class SizeCalculator: alloc_sections = [ "bss", "noinit", "app_bss", "app_noinit", "ccm_bss", "ccm_noinit" ] rw_sections = [ "datas", "initlevel", "exceptions", "initshell", "_static_thread_area", "_k_timer_area", "_k_mem_slab_area", "_k_mem_pool_area", "sw_isr_table", "_k_sem_area", "_k_mutex_area", "app_shmem_regions", "_k_fifo_area", "_k_lifo_area", "_k_stack_area", "_k_msgq_area", "_k_mbox_area", "_k_pipe_area", "net_if", "net_if_dev", "net_l2_data", "_k_queue_area", "_net_buf_pool_area", "app_datas", "kobject_data", "mmu_tables", "app_pad", "priv_stacks", "ccm_data", "usb_descriptor", "usb_data", "usb_bos_desc", "uart_mux", 'log_backends_sections', 'log_dynamic_sections', 'log_const_sections', "app_smem", 'shell_root_cmds_sections', 'log_const_sections', "font_entry_sections", "priv_stacks_noinit", "_GCOV_BSS_SECTION_NAME", "gcov", "nocache" ] # These get copied into RAM only on non-XIP ro_sections = [ "rom_start", "text", "ctors", "init_array", "reset", "object_access", "rodata", "devconfig", "net_l2", "vector", "sw_isr_table", "_settings_handlers_area", "_bt_channels_area", "_bt_br_channels_area", "_bt_services_area", "vectors", "net_socket_register", "net_ppp_proto" ] def __init__(self, filename, extra_sections): """Constructor @param filename Path to the output binary The <filename> is parsed by objdump to determine section sizes """ # Make sure this is an ELF binary with open(filename, "rb") as f: magic = f.read(4) try: if magic != b'\x7fELF': raise SanityRuntimeError("%s is not an ELF binary" % filename) except Exception as e: print(str(e)) sys.exit(2) # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. # GREP can not be used as it returns an error if the symbol is not # found. is_xip_command = "nm " + filename + \ " | awk '/CONFIG_XIP/ { print $3 }'" is_xip_output = subprocess.check_output( is_xip_command, shell=True, stderr=subprocess.STDOUT).decode( "utf-8").strip() try: if is_xip_output.endswith("no symbols"): raise SanityRuntimeError("%s has no symbol information" % filename) except Exception as e: print(str(e)) sys.exit(2) self.is_xip = (len(is_xip_output) != 0) self.filename = filename self.sections = [] self.rom_size = 0 self.ram_size = 0 self.extra_sections = extra_sections self._calculate_sizes() def get_ram_size(self): """Get the amount of RAM the application will use up on the device @return amount of RAM, in bytes """ return self.ram_size def get_rom_size(self): """Get the size of the data that this application uses on device's flash @return amount of ROM, in bytes """ return self.rom_size def unrecognized_sections(self): """Get a list of sections inside the binary that weren't recognized @return list of unrecognized section names """ slist = [] for v in self.sections: if not v["recognized"]: slist.append(v["name"]) return slist def _calculate_sizes(self): """ Calculate RAM and ROM usage by section """ objdump_command = "objdump -h " + self.filename objdump_output = subprocess.check_output( objdump_command, shell=True).decode("utf-8").splitlines() for line in objdump_output: words = line.split() if not words: # Skip lines that are too short continue index = words[0] if not index[0].isdigit(): # Skip lines that do not start continue # with a digit name = words[1] # Skip lines with section names if name[0] == '.': # starting with '.' continue # TODO this doesn't actually reflect the size in flash or RAM as # it doesn't include linker-imposed padding between sections. # It is close though. size = int(words[2], 16) if size == 0: continue load_addr = int(words[4], 16) virt_addr = int(words[3], 16) # Add section to memory use totals (for both non-XIP and XIP scenarios) # Unrecognized section names are not included in the calculations. recognized = True if name in SizeCalculator.alloc_sections: self.ram_size += size stype = "alloc" elif name in SizeCalculator.rw_sections: self.ram_size += size self.rom_size += size stype = "rw" elif name in SizeCalculator.ro_sections: self.rom_size += size if not self.is_xip: self.ram_size += size stype = "ro" else: stype = "unknown" if name not in self.extra_sections: recognized = False self.sections.append({"name": name, "load_addr": load_addr, "size": size, "virt_addr": virt_addr, "type": stype, "recognized": recognized}) class SanityConfigParser: """Class to read test case files with semantic checking """ def __init__(self, filename, schema): """Instantiate a new SanityConfigParser object @param filename Source .yaml file to read """ self.data = {} self.schema = schema self.filename = filename self.tests = {} self.common = {} def load(self): self.data = scl.yaml_load_verify(self.filename, self.schema) if 'tests' in self.data: self.tests = self.data['tests'] if 'common' in self.data: self.common = self.data['common'] def _cast_value(self, value, typestr): if isinstance(value, str): v = value.strip() if typestr == "str": return v elif typestr == "float": return float(value) elif typestr == "int": return int(value) elif typestr == "bool": return value elif typestr.startswith("list") and isinstance(value, list): return value elif typestr.startswith("list") and isinstance(value, str): vs = v.split() if len(typestr) > 4 and typestr[4] == ":": return [self._cast_value(vsi, typestr[5:]) for vsi in vs] else: return vs elif typestr.startswith("set"): vs = v.split() if len(typestr) > 3 and typestr[3] == ":": return {self._cast_value(vsi, typestr[4:]) for vsi in vs} else: return set(vs) elif typestr.startswith("map"): return value else: raise ConfigurationError( self.filename, "unknown type '%s'" % value) def get_test(self, name, valid_keys): """Get a dictionary representing the keys/values within a test @param name The test in the .yaml file to retrieve data from @param valid_keys A dictionary representing the intended semantics for this test. Each key in this dictionary is a key that could be specified, if a key is given in the .yaml file which isn't in here, it will generate an error. Each value in this dictionary is another dictionary containing metadata: "default" - Default value if not given "type" - Data type to convert the text value to. Simple types supported are "str", "float", "int", "bool" which will get converted to respective Python data types. "set" and "list" may also be specified which will split the value by whitespace (but keep the elements as strings). finally, "list:<type>" and "set:<type>" may be given which will perform a type conversion after splitting the value up. "required" - If true, raise an error if not defined. If false and "default" isn't specified, a type conversion will be done on an empty string @return A dictionary containing the test key-value pairs with type conversion and default values filled in per valid_keys """ d = {} for k, v in self.common.items(): d[k] = v for k, v in self.tests[name].items(): if k not in valid_keys: raise ConfigurationError( self.filename, "Unknown config key '%s' in definition for '%s'" % (k, name)) if k in d: if isinstance(d[k], str): # By default, we just concatenate string values of keys # which appear both in "common" and per-test sections, # but some keys are handled in adhoc way based on their # semantics. if k == "filter": d[k] = "(%s) and (%s)" % (d[k], v) else: d[k] += " " + v else: d[k] = v for k, kinfo in valid_keys.items(): if k not in d: if "required" in kinfo: required = kinfo["required"] else: required = False if required: raise ConfigurationError( self.filename, "missing required value for '%s' in test '%s'" % (k, name)) else: if "default" in kinfo: default = kinfo["default"] else: default = self._cast_value("", kinfo["type"]) d[k] = default else: try: d[k] = self._cast_value(d[k], kinfo["type"]) except ValueError: raise ConfigurationError( self.filename, "bad %s value '%s' for key '%s' in name '%s'" % (kinfo["type"], d[k], k, name)) return d class Platform: """Class representing metadata for a particular platform Maps directly to BOARD when building""" platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "platform-schema.yaml")) def __init__(self): """Constructor. """ self.name = "" self.sanitycheck = True # if no RAM size is specified by the board, take a default of 128K self.ram = 128 self.ignore_tags = [] self.default = False # if no flash size is specified by the board, take a default of 512K self.flash = 512 self.supported = set() self.arch = "" self.type = "na" self.simulation = "na" self.supported_toolchains = [] self.env = [] self.env_satisfied = True self.filter_data = dict() def load(self, platform_file): scp = SanityConfigParser(platform_file, self.platform_schema) scp.load() data = scp.data self.name = data['identifier'] self.sanitycheck = data.get("sanitycheck", True) # if no RAM size is specified by the board, take a default of 128K self.ram = data.get("ram", 128) testing = data.get("testing", {}) self.ignore_tags = testing.get("ignore_tags", []) self.default = testing.get("default", False) # if no flash size is specified by the board, take a default of 512K self.flash = data.get("flash", 512) self.supported = set() for supp_feature in data.get("supported", []): for item in supp_feature.split(":"): self.supported.add(item) self.arch = data['arch'] self.type = data.get('type', "na") self.simulation = data.get('simulation', "na") self.supported_toolchains = data.get("toolchain", []) self.env = data.get("env", []) self.env_satisfied = True for env in self.env: if not os.environ.get(env, None): self.env_satisfied = False def __repr__(self): return "<%s on %s>" % (self.name, self.arch) class DisablePyTestCollectionMixin(object): __test__ = False class TestCase(DisablePyTestCollectionMixin): """Class representing a test application """ def __init__(self, testcase_root, workdir, name): """TestCase constructor. This gets called by TestSuite as it finds and reads test yaml files. Multiple TestCase instances may be generated from a single testcase.yaml, each one corresponds to an entry within that file. We need to have a unique name for every single test case. Since a testcase.yaml can define multiple tests, the canonical name for the test case is <workdir>/<name>. @param testcase_root os.path.abspath() of one of the --testcase-root @param workdir Sub-directory of testcase_root where the .yaml test configuration file was found @param name Name of this test case, corresponding to the entry name in the test case configuration file. For many test cases that just define one test, can be anything and is usually "test". This is really only used to distinguish between different cases when the testcase.yaml defines multiple tests """ self.source_dir = "" self.yamlfile = "" self.cases = [] self.name = self.get_unique(testcase_root, workdir, name) self.id = name self.type = None self.tags = set() self.extra_args = None self.extra_configs = None self.arch_whitelist = None self.arch_exclude = None self.skip = False self.platform_exclude = None self.platform_whitelist = None self.toolchain_exclude = None self.toolchain_whitelist = None self.tc_filter = None self.timeout = 60 self.harness = "" self.harness_config = {} self.build_only = True self.build_on_all = False self.slow = False self.min_ram = -1 self.depends_on = None self.min_flash = -1 self.extra_sections = None @staticmethod def get_unique(testcase_root, workdir, name): canonical_testcase_root = os.path.realpath(testcase_root) if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents: # This is in ZEPHYR_BASE, so include path in name for uniqueness # FIXME: We should not depend on path of test for unique names. relative_tc_root = os.path.relpath(canonical_testcase_root, start=canonical_zephyr_base) else: relative_tc_root = "" # workdir can be "." unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name)) check = name.split(".") if len(check) < 2: raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \ Tests should reference the category and subsystem with a dot as a separator. """ ) return unique @staticmethod def scan_file(inf_name): suite_regex = re.compile( # do not match until end-of-line, otherwise we won't allow # stc_regex below to catch the ones that are declared in the same # line--as we only search starting the end of this match br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) stc_regex = re.compile( br"^\s*" # empy space at the beginning is ok # catch the case where it is declared in the same sentence, e.g: # # ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME)); br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?" # Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME) br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?" # Consume the argument that becomes the extra testcse br"\(\s*" br"(?P<stc_name>[a-zA-Z0-9_]+)" # _setup_teardown() variant has two extra arguments that we ignore br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?" br"\s*\)", # We don't check how it finishes; we don't care re.MULTILINE) suite_run_regex = re.compile( br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)", re.MULTILINE) achtung_regex = re.compile( br"(#ifdef|#endif)", re.MULTILINE) warnings = None with open(inf_name) as inf: if os.name == 'nt': mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ} else: mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ, 'offset': 0} with contextlib.closing(mmap.mmap(**mmap_args)) as main_c: # contextlib makes pylint think main_c isn't subscriptable # pylint: disable=unsubscriptable-object suite_regex_match = suite_regex.search(main_c) if not suite_regex_match: # can't find ztest_test_suite, maybe a client, because # it includes ztest.h return None, None suite_run_match = suite_run_regex.search(main_c) if not suite_run_match: raise ValueError("can't find ztest_run_test_suite") achtung_matches = re.findall( achtung_regex, main_c[suite_regex_match.end():suite_run_match.start()]) if achtung_matches: warnings = "found invalid %s in ztest_test_suite()" \ % ", ".join({match.decode() for match in achtung_matches}) _matches = re.findall( stc_regex, main_c[suite_regex_match.end():suite_run_match.start()]) matches = [match.decode().replace("test_", "") for match in _matches] return matches, warnings def scan_path(self, path): subcases = [] for filename in glob.glob(os.path.join(path, "src", "*.c")): try: _subcases, warnings = self.scan_file(filename) if warnings: logger.error("%s: %s" % (filename, warnings)) raise SanityRuntimeError("%s: %s" % (filename, warnings)) if _subcases: subcases += _subcases except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) for filename in glob.glob(os.path.join(path, "*.c")): try: _subcases, warnings = self.scan_file(filename) if warnings: logger.error("%s: %s" % (filename, warnings)) if _subcases: subcases += _subcases except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) return subcases def parse_subcases(self, test_path): results = self.scan_path(test_path) for sub in results: name = "{}.{}".format(self.id, sub) self.cases.append(name) if not results: self.cases.append(self.id) def __str__(self): return self.name class TestInstance(DisablePyTestCollectionMixin): """Class representing the execution of a particular TestCase on a platform @param test The TestCase object we want to build/execute @param platform Platform object that we want to build and run against @param base_outdir Base directory for all test results. The actual out directory used is <outdir>/<platform>/<test case name> """ def __init__(self, testcase, platform, outdir): self.testcase = testcase self.platform = platform self.status = None self.reason = "Unknown" self.metrics = dict() self.handler = None self.outdir = outdir self.name = os.path.join(platform.name, testcase.name) self.build_dir = os.path.join(outdir, platform.name, testcase.name) self.build_only = True self.run = False self.results = {} def __lt__(self, other): return self.name < other.name # Global testsuite parameters def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixture=[]): # right now we only support building on windows. running is still work # in progress. if os.name == 'nt': self.build_only = True self.run = False return _build_only = True # we asked for build-only on the command line if build_only or self.testcase.build_only: self.build_only = True self.run = False return # Do not run slow tests: skip_slow = self.testcase.slow and not enable_slow if skip_slow: self.build_only = True self.run = False return runnable = bool(self.testcase.type == "unit" or \ self.platform.type == "native" or \ self.platform.simulation in ["nsim", "renode", "qemu"] or \ device_testing) if self.platform.simulation == "nsim": if not find_executable("nsimdrv"): runnable = False if self.platform.simulation == "renode": if not find_executable("renode"): runnable = False # console harness allows us to run the test and capture data. if self.testcase.harness == 'console': # if we have a fixture that is also being supplied on the # command-line, then we need to run the test, not just build it. if "fixture" in self.testcase.harness_config: fixture_cfg = self.testcase.harness_config['fixture'] if fixture_cfg in fixture: _build_only = False else: _build_only = True else: _build_only = False elif self.testcase.harness: _build_only = True else: _build_only = False self.build_only = not (not _build_only and runnable) self.run = not self.build_only return def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]): # Create this in a "sanitycheck/" subdirectory otherwise this # will pass this overlay to kconfig.py *twice* and kconfig.cmake # will silently give that second time precedence over any # --extra-args=CONFIG_* subdir = os.path.join(self.build_dir, "sanitycheck") os.makedirs(subdir, exist_ok=True) file = os.path.join(subdir, "testcase_extra.conf") with open(file, "w") as f: content = "" if self.testcase.extra_configs: content = "\n".join(self.testcase.extra_configs) if enable_coverage: if platform.name in coverage_platform: content = content + "\nCONFIG_COVERAGE=y" content = content + "\nCONFIG_COVERAGE_DUMP=y" if enable_asan: if platform.type == "native": content = content + "\nCONFIG_ASAN=y" f.write(content) def calculate_sizes(self): """Get the RAM/ROM sizes of a test case. This can only be run after the instance has been executed by MakeGenerator, otherwise there won't be any binaries to measure. @return A SizeCalculator object """ fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf")) fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe"))) fns = [x for x in fns if not x.endswith('_prebuilt.elf')] if len(fns) != 1: raise BuildError("Missing/multiple output ELF binary") return SizeCalculator(fns[0], self.testcase.extra_sections) def __repr__(self): return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name) class CMake(): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') def __init__(self, testcase, platform, source_dir, build_dir): self.cwd = None self.capture_output = True self.defconfig = {} self.cmake_cache = {} self.instance = None self.testcase = testcase self.platform = platform self.source_dir = source_dir self.build_dir = build_dir self.log = "build.log" self.generator = None self.generator_cmd = None def parse_generated(self): self.defconfig = {} return {} def run_build(self, args=[]): logger.debug("Building %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [] cmake_args.extend(args) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() results = {} if p.returncode == 0: msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) self.instance.status = "passed" results = {'msg': msg, "returncode": p.returncode, "instance": self.instance} if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) else: return None else: # A real error occurred, raise an exception if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) if log_msg: res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg) if res: logger.debug("Test skipped due to {} Overflow".format(res[0])) self.instance.status = "skipped" self.instance.reason = "{} overflow".format(res[0]) else: self.instance.status = "failed" self.instance.reason = "Build failure" results = { "returncode": p.returncode, "instance": self.instance, } return results def run_cmake(self, args=[]): ldflags = "-Wl,--fatal-warnings" logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name)) # fixme: add additional cflags based on options cmake_args = [ '-B{}'.format(self.build_dir), '-S{}'.format(self.source_dir), '-DEXTRA_CFLAGS="-Werror ', '-DEXTRA_AFLAGS=-Wa,--fatal-warnings', '-DEXTRA_LDFLAGS="{}'.format(ldflags), '-G{}'.format(self.generator) ] if self.cmake_only: cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1") args = ["-D{}".format(a.replace('"', '')) for a in args] cmake_args.extend(args) cmake_opts = ['-DBOARD={}'.format(self.platform.name)] cmake_args.extend(cmake_opts) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() if p.returncode == 0: filter_results = self.parse_generated() msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) logger.debug(msg) results = {'msg': msg, 'filter': filter_results} else: self.instance.status = "failed" self.instance.reason = "Cmake build failure" logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name)) results = {"returncode": p.returncode} if out: with open(os.path.join(self.build_dir, self.log), "a") as log: log_msg = out.decode(sys.getdefaultencoding()) log.write(log_msg) return results class FilterBuilder(CMake): def __init__(self, testcase, platform, source_dir, build_dir): super().__init__(testcase, platform, source_dir, build_dir) self.log = "config-sanitycheck.log" def parse_generated(self): if self.platform.name == "unit_testing": return {} cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt") defconfig_path = os.path.join(self.build_dir, "zephyr", ".config") with open(defconfig_path, "r") as fp: defconfig = {} for line in fp.readlines(): m = self.config_re.match(line) if not m: if line.strip() and not line.startswith("#"): sys.stderr.write("Unrecognized line %s\n" % line) continue defconfig[m.group(1)] = m.group(2).strip() self.defconfig = defconfig cmake_conf = {} try: cache = CMakeCache.from_file(cmake_cache_path) except FileNotFoundError: cache = {} for k in iter(cache): cmake_conf[k.name] = k.value self.cmake_cache = cmake_conf filter_data = { "ARCH": self.platform.arch, "PLATFORM": self.platform.name } filter_data.update(os.environ) filter_data.update(self.defconfig) filter_data.update(self.cmake_cache) dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp") if self.testcase and self.testcase.tc_filter: try: if os.path.exists(dts_path): edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")], warn_reg_unit_address_mismatch=False) else: edt = None res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt) except (ValueError, SyntaxError) as se: sys.stderr.write( "Failed processing %s\n" % self.testcase.yamlfile) raise se if not res: return {os.path.join(self.platform.name, self.testcase.name): True} else: return {os.path.join(self.platform.name, self.testcase.name): False} else: self.platform.filter_data = filter_data return filter_data class ProjectBuilder(FilterBuilder): def __init__(self, suite, instance, **kwargs): super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir) self.log = "build.log" self.instance = instance self.suite = suite self.lsan = kwargs.get('lsan', False) self.asan = kwargs.get('asan', False) self.valgrind = kwargs.get('valgrind', False) self.extra_args = kwargs.get('extra_args', []) self.device_testing = kwargs.get('device_testing', False) self.cmake_only = kwargs.get('cmake_only', False) self.cleanup = kwargs.get('cleanup', False) self.coverage = kwargs.get('coverage', False) self.inline_logs = kwargs.get('inline_logs', False) self.generator = kwargs.get('generator', None) self.generator_cmd = kwargs.get('generator_cmd', None) self.verbose = kwargs.get('verbose', None) @staticmethod def log_info(filename, inline_logs): filename = os.path.abspath(os.path.realpath(filename)) if inline_logs: logger.info("{:-^100}".format(filename)) try: with open(filename) as fp: data = fp.read() except Exception as e: data = "Unable to read log data (%s)\n" % (str(e)) logger.error(data) logger.info("{:-^100}".format(filename)) else: logger.error("see: " + Fore.YELLOW + filename + Fore.RESET) def log_info_file(self, inline_logs): build_dir = self.instance.build_dir h_log = "{}/handler.log".format(build_dir) b_log = "{}/build.log".format(build_dir) v_log = "{}/valgrind.log".format(build_dir) d_log = "{}/device.log".format(build_dir) if os.path.exists(v_log) and "Valgrind" in self.instance.reason: self.log_info("{}".format(v_log), inline_logs) elif os.path.exists(h_log) and os.path.getsize(h_log) > 0: self.log_info("{}".format(h_log), inline_logs) elif os.path.exists(d_log) and os.path.getsize(d_log) > 0: self.log_info("{}".format(d_log), inline_logs) else: self.log_info("{}".format(b_log), inline_logs) def setup_handler(self): instance = self.instance args = [] # FIXME: Needs simplification if instance.platform.simulation == "qemu": instance.handler = QEMUHandler(instance, "qemu") args.append("QEMU_PIPE=%s" % instance.handler.get_fifo()) instance.handler.call_make_run = True elif instance.testcase.type == "unit": instance.handler = BinaryHandler(instance, "unit") instance.handler.binary = os.path.join(instance.build_dir, "testbinary") if self.coverage: args.append("COVERAGE=1") elif instance.platform.type == "native": handler = BinaryHandler(instance, "native") handler.asan = self.asan handler.valgrind = self.valgrind handler.lsan = self.lsan handler.coverage = self.coverage handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe") instance.handler = handler elif instance.platform.simulation == "nsim": if find_executable("nsimdrv"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "renode": if find_executable("renode"): instance.handler = BinaryHandler(instance, "renode") instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid") instance.handler.call_make_run = True elif self.device_testing: instance.handler = DeviceHandler(instance, "device") if instance.handler: instance.handler.args = args instance.handler.generator_cmd = self.generator_cmd instance.handler.generator = self.generator def process(self, message): op = message.get('op') if not self.instance.handler: self.setup_handler() # The build process, call cmake and build with configured generator if op == "cmake": results = self.cmake() if self.instance.status == "failed": pipeline.put({"op": "report", "test": self.instance}) elif self.cmake_only: pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.name in results['filter'] and results['filter'][self.instance.name]: logger.debug("filtering %s" % self.instance.name) self.instance.status = "skipped" self.instance.reason = "filter" pipeline.put({"op": "report", "test": self.instance}) else: pipeline.put({"op": "build", "test": self.instance}) elif op == "build": logger.debug("build test: %s" % self.instance.name) results = self.build() if not results: self.instance.status = "failed" self.instance.reason = "Build Failure" pipeline.put({"op": "report", "test": self.instance}) else: if results.get('returncode', 1) > 0: pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.run: pipeline.put({"op": "run", "test": self.instance}) else: pipeline.put({"op": "report", "test": self.instance}) # Run the generated binary using one of the supported handlers elif op == "run": logger.debug("run test: %s" % self.instance.name) self.run() self.instance.status, _ = self.instance.handler.get_state() pipeline.put({ "op": "report", "test": self.instance, "state": "executed", "status": self.instance.status, "reason": self.instance.reason} ) # Report results and output progress to screen elif op == "report": with report_lock: self.report_out() if self.cleanup and not self.coverage and self.instance.status == "passed": pipeline.put({ "op": "cleanup", "test": self.instance }) elif op == "cleanup": self.cleanup_artifacts() def cleanup_artifacts(self): logger.debug("Cleaning up {}".format(self.instance.build_dir)) whitelist = [ 'zephyr/.config', 'handler.log', 'build.log', 'device.log', 'recording.csv', ] whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist] for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False): for name in filenames: path = os.path.join(dirpath, name) if path not in whitelist: os.remove(path) # Remove empty directories and symbolic links to directories for dir in dirnames: path = os.path.join(dirpath, dir) if os.path.islink(path): os.remove(path) elif not os.listdir(path): os.rmdir(path) def report_out(self): total_tests_width = len(str(self.suite.total_tests)) self.suite.total_done += 1 instance = self.instance if instance.status in ["failed", "timeout"]: self.suite.total_failed += 1 if self.verbose: status = Fore.RED + "FAILED " + Fore.RESET + instance.reason else: print("") logger.error( "{:<25} {:<50} {}FAILED{}: {}".format( instance.platform.name, instance.testcase.name, Fore.RED, Fore.RESET, instance.reason)) if not self.verbose: self.log_info_file(self.inline_logs) elif instance.status == "skipped": self.suite.total_skipped += 1 status = Fore.YELLOW + "SKIPPED" + Fore.RESET else: status = Fore.GREEN + "PASSED" + Fore.RESET if self.verbose: if self.cmake_only: more_info = "cmake" elif instance.status == "skipped": more_info = instance.reason else: if instance.handler and instance.run: more_info = instance.handler.type_str htime = instance.handler.duration if htime: more_info += " {:.3f}s".format(htime) else: more_info = "build" logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name, instance.testcase.name, status, more_info)) if instance.status in ["failed", "timeout"]: self.log_info_file(self.inline_logs) else: sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % ( Fore.GREEN, self.suite.total_done, self.suite.total_tests, Fore.RESET, int((float(self.suite.total_done) / self.suite.total_tests) * 100), Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET, self.suite.total_skipped, Fore.RESET, Fore.RED if self.suite.total_failed > 0 else Fore.RESET, self.suite.total_failed, Fore.RESET ) ) sys.stdout.flush() def cmake(self): instance = self.instance args = self.testcase.extra_args[:] args += self.extra_args if instance.handler: args += instance.handler.args # merge overlay files into one variable def extract_overlays(args): re_overlay = re.compile('OVERLAY_CONFIG=(.*)') other_args = [] overlays = [] for arg in args: match = re_overlay.search(arg) if match: overlays.append(match.group(1).strip('\'"')) else: other_args.append(arg) args[:] = other_args return overlays overlays = extract_overlays(args) if (self.testcase.extra_configs or self.coverage or self.asan): overlays.append(os.path.join(instance.build_dir, "sanitycheck", "testcase_extra.conf")) if overlays: args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays))) results = self.run_cmake(args) return results def build(self): results = self.run_build(['--build', self.build_dir]) return results def run(self): instance = self.instance if instance.handler.type_str == "device": instance.handler.suite = self.suite instance.handler.handle() sys.stdout.flush() class BoundedExecutor(concurrent.futures.ThreadPoolExecutor): """BoundedExecutor behaves as a ThreadPoolExecutor which will block on calls to submit() once the limit given as "bound" work items are queued for execution. :param bound: Integer - the maximum number of items in the work queue :param max_workers: Integer - the size of the thread pool """ def __init__(self, bound, max_workers, **kwargs): super().__init__(max_workers) # self.executor = ThreadPoolExecutor(max_workers=max_workers) self.semaphore = BoundedSemaphore(bound + max_workers) def submit(self, fn, *args, **kwargs): self.semaphore.acquire() try: future = super().submit(fn, *args, **kwargs) except Exception: self.semaphore.release() raise else: future.add_done_callback(lambda x: self.semaphore.release()) return future class TestSuite(DisablePyTestCollectionMixin): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') tc_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "testcase-schema.yaml")) testcase_valid_keys = {"tags": {"type": "set", "required": False}, "type": {"type": "str", "default": "integration"}, "extra_args": {"type": "list"}, "extra_configs": {"type": "list"}, "build_only": {"type": "bool", "default": False}, "build_on_all": {"type": "bool", "default": False}, "skip": {"type": "bool", "default": False}, "slow": {"type": "bool", "default": False}, "timeout": {"type": "int", "default": 60}, "min_ram": {"type": "int", "default": 8}, "depends_on": {"type": "set"}, "min_flash": {"type": "int", "default": 32}, "arch_whitelist": {"type": "set"}, "arch_exclude": {"type": "set"}, "extra_sections": {"type": "list", "default": []}, "platform_exclude": {"type": "set"}, "platform_whitelist": {"type": "set"}, "toolchain_exclude": {"type": "set"}, "toolchain_whitelist": {"type": "set"}, "filter": {"type": "str"}, "harness": {"type": "str"}, "harness_config": {"type": "map", "default": {}} } RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "sanity_last_release.csv") def __init__(self, board_root_list=[], testcase_roots=[], outdir=None): self.roots = testcase_roots if not isinstance(board_root_list, list): self.board_roots = [board_root_list] else: self.board_roots = board_root_list # Testsuite Options self.coverage_platform = [] self.build_only = False self.cmake_only = False self.cleanup = False self.enable_slow = False self.device_testing = False self.fixture = [] self.enable_coverage = False self.enable_lsan = False self.enable_asan = False self.enable_valgrind = False self.extra_args = [] self.inline_logs = False self.enable_sizes_report = False self.west_flash = None self.west_runner = None self.generator = None self.generator_cmd = None # Keep track of which test cases we've filtered out and why self.testcases = {} self.platforms = [] self.selected_platforms = [] self.default_platforms = [] self.outdir = os.path.abspath(outdir) self.discards = {} self.load_errors = 0 self.instances = dict() self.total_tests = 0 # number of test instances self.total_cases = 0 # number of test cases self.total_done = 0 # tests completed self.total_failed = 0 self.total_skipped = 0 self.total_platforms = 0 self.start_time = 0 self.duration = 0 self.warnings = 0 self.cv = threading.Condition() # hardcoded for now self.connected_hardware = [] def config(self): logger.info("coverage platform: {}".format(self.coverage_platform)) # Debug Functions @staticmethod def info(what): sys.stdout.write(what + "\n") sys.stdout.flush() def update(self): self.total_tests = len(self.instances) self.total_cases = len(self.testcases) def compare_metrics(self, filename): # name, datatype, lower results better interesting_metrics = [("ram_size", int, True), ("rom_size", int, True)] if not os.path.exists(filename): logger.info("Cannot compare metrics, %s not found" % filename) return [] results = [] saved_metrics = {} with open(filename) as fp: cr = csv.DictReader(fp) for row in cr: d = {} for m, _, _ in interesting_metrics: d[m] = row[m] saved_metrics[(row["test"], row["platform"])] = d for instance in self.instances.values(): mkey = (instance.testcase.name, instance.platform.name) if mkey not in saved_metrics: continue sm = saved_metrics[mkey] for metric, mtype, lower_better in interesting_metrics: if metric not in instance.metrics: continue if sm[metric] == "": continue delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) if delta == 0: continue results.append((instance, metric, instance.metrics.get(metric, 0), delta, lower_better)) return results def misc_reports(self, report, show_footprint, all_deltas, footprint_threshold, last_metrics): if not report: return deltas = self.compare_metrics(report) warnings = 0 if deltas and show_footprint: for i, metric, value, delta, lower_better in deltas: if not all_deltas and ((delta < 0 and lower_better) or (delta > 0 and not lower_better)): continue percentage = (float(delta) / float(value - delta)) if not all_deltas and (percentage < (footprint_threshold / 100.0)): continue logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( i.platform.name, i.testcase.name, Fore.YELLOW, "INFO" if all_deltas else "WARNING", Fore.RESET, metric, delta, value, percentage)) warnings += 1 if warnings: logger.warning("Deltas based on metrics from last %s" % ("release" if not last_metrics else "run")) def summary(self, unrecognized_sections): failed = 0 for instance in self.instances.values(): if instance.status == "failed": failed += 1 elif instance.metrics.get("unrecognized") and not unrecognized_sections: logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" % (Fore.RED, Fore.RESET, instance.name, str(instance.metrics.get("unrecognized", [])))) failed += 1 if self.total_tests and self.total_tests != self.total_skipped: pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float( self.total_tests - self.total_skipped)) else: pass_rate = 0 logger.info( "{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format( Fore.RED if failed else Fore.GREEN, self.total_tests - self.total_failed - self.total_skipped, self.total_tests - self.total_skipped, Fore.RESET, pass_rate, Fore.RED if self.total_failed else Fore.RESET, self.total_failed, Fore.RESET, self.total_skipped, Fore.YELLOW if self.warnings else Fore.RESET, self.warnings, Fore.RESET, self.duration)) self.total_platforms = len(self.platforms) if self.platforms: logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format( self.total_cases, len(self.selected_platforms), self.total_platforms, (100 * len(self.selected_platforms) / len(self.platforms)) )) def save_reports(self, name, suffix, report_dir, no_update, release, only_failed): if not self.instances: return if name: report_name = name else: report_name = "sanitycheck" if report_dir: os.makedirs(report_dir, exist_ok=True) filename = os.path.join(report_dir, report_name) outdir = report_dir else: filename = os.path.join(self.outdir, report_name) outdir = self.outdir if suffix: filename = "{}_{}".format(filename, suffix) if not no_update: self.xunit_report(filename + ".xml", full_report=False, append=only_failed) self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed) self.csv_report(filename + ".csv") self.target_report(outdir, suffix, append=only_failed) if self.discards: self.discard_report(filename + "_discard.csv") if release: self.csv_report(self.RELEASE_DATA) def add_configurations(self): for board_root in self.board_roots: board_root = os.path.abspath(board_root) logger.debug("Reading platform configuration files under %s..." % board_root) for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")): logger.debug("Found platform configuration " + file) try: platform = Platform() platform.load(file) if platform.sanitycheck: self.platforms.append(platform) if platform.default: self.default_platforms.append(platform.name) except RuntimeError as e: logger.error("E: %s: can't load: %s" % (file, e)) self.load_errors += 1 def get_all_tests(self): tests = [] for _, tc in self.testcases.items(): for case in tc.cases: tests.append(case) return tests @staticmethod def get_toolchain(): toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \ os.environ.get("ZEPHYR_GCC_VARIANT", None) if toolchain == "gccarmemb": # Remove this translation when gccarmemb is no longer supported. toolchain = "gnuarmemb" try: if not toolchain: raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined") except Exception as e: print(str(e)) sys.exit(2) return toolchain def add_testcases(self, testcase_filter=[]): for root in self.roots: root = os.path.abspath(root) logger.debug("Reading test case configuration files under %s..." % root) for dirpath, dirnames, filenames in os.walk(root, topdown=True): logger.debug("scanning %s" % dirpath) if 'sample.yaml' in filenames: filename = 'sample.yaml' elif 'testcase.yaml' in filenames: filename = 'testcase.yaml' else: continue logger.debug("Found possible test case in " + dirpath) dirnames[:] = [] tc_path = os.path.join(dirpath, filename) try: parsed_data = SanityConfigParser(tc_path, self.tc_schema) parsed_data.load() tc_path = os.path.dirname(tc_path) workdir = os.path.relpath(tc_path, root) for name in parsed_data.tests.keys(): tc = TestCase(root, workdir, name) tc_dict = parsed_data.get_test(name, self.testcase_valid_keys) tc.source_dir = tc_path tc.yamlfile = tc_path tc.type = tc_dict["type"] tc.tags = tc_dict["tags"] tc.extra_args = tc_dict["extra_args"] tc.extra_configs = tc_dict["extra_configs"] tc.arch_whitelist = tc_dict["arch_whitelist"] tc.arch_exclude = tc_dict["arch_exclude"] tc.skip = tc_dict["skip"] tc.platform_exclude = tc_dict["platform_exclude"] tc.platform_whitelist = tc_dict["platform_whitelist"] tc.toolchain_exclude = tc_dict["toolchain_exclude"] tc.toolchain_whitelist = tc_dict["toolchain_whitelist"] tc.tc_filter = tc_dict["filter"] tc.timeout = tc_dict["timeout"] tc.harness = tc_dict["harness"] tc.harness_config = tc_dict["harness_config"] if tc.harness == 'console' and not tc.harness_config: raise Exception('Harness config error: console harness defined without a configuration.') tc.build_only = tc_dict["build_only"] tc.build_on_all = tc_dict["build_on_all"] tc.slow = tc_dict["slow"] tc.min_ram = tc_dict["min_ram"] tc.depends_on = tc_dict["depends_on"] tc.min_flash = tc_dict["min_flash"] tc.extra_sections = tc_dict["extra_sections"] tc.parse_subcases(tc_path) if testcase_filter: if tc.name and tc.name in testcase_filter: self.testcases[tc.name] = tc else: self.testcases[tc.name] = tc except Exception as e: logger.error("%s: can't load (skipping): %s" % (tc_path, e)) self.load_errors += 1 def get_platform(self, name): selected_platform = None for platform in self.platforms: if platform.name == name: selected_platform = platform break return selected_platform def load_from_file(self, file, filter_status=[]): try: with open(file, "r") as fp: cr = csv.DictReader(fp) instance_list = [] for row in cr: if row["status"] in filter_status: continue test = row["test"] platform = self.get_platform(row["platform"]) instance = TestInstance(self.testcases[test], platform, self.outdir) instance.check_build_or_run( self.build_only, self.enable_slow, self.device_testing, self.fixture ) instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform) instance_list.append(instance) self.add_instances(instance_list) except KeyError as e: logger.error("Key error while parsing tests file.({})".format(str(e))) sys.exit(2) except FileNotFoundError as e: logger.error("Couldn't find input file with list of tests. ({})".format(e)) sys.exit(2) def apply_filters(self, **kwargs): toolchain = self.get_toolchain() discards = {} platform_filter = kwargs.get('platform') exclude_platform = kwargs.get('exclude_platform', []) testcase_filter = kwargs.get('run_individual_tests', []) arch_filter = kwargs.get('arch') tag_filter = kwargs.get('tag') exclude_tag = kwargs.get('exclude_tag') all_filter = kwargs.get('all') device_testing_filter = kwargs.get('device_testing') force_toolchain = kwargs.get('force_toolchain') logger.debug("platform filter: " + str(platform_filter)) logger.debug(" arch_filter: " + str(arch_filter)) logger.debug(" tag_filter: " + str(tag_filter)) logger.debug(" exclude_tag: " + str(exclude_tag)) default_platforms = False if platform_filter: platforms = list(filter(lambda p: p.name in platform_filter, self.platforms)) else: platforms = self.platforms if all_filter: logger.info("Selecting all possible platforms per test case") # When --all used, any --platform arguments ignored platform_filter = [] elif not platform_filter: logger.info("Selecting default platforms per test case") default_platforms = True logger.info("Building initial testcase list...") for tc_name, tc in self.testcases.items(): # list of instances per testcase, aka configurations. instance_list = [] for plat in platforms: instance = TestInstance(tc, plat, self.outdir) instance.check_build_or_run( self.build_only, self.enable_slow, self.device_testing, self.fixture ) if plat.name in exclude_platform: discards[instance] = "Platform is excluded on command line." continue if (plat.arch == "unit") != (tc.type == "unit"): # Discard silently continue if device_testing_filter and instance.build_only: discards[instance] = "Not runnable on device" continue if tc.skip: discards[instance] = "Skip filter" continue if tc.build_on_all and not platform_filter: platform_filter = [] if tag_filter and not tc.tags.intersection(tag_filter): discards[instance] = "Command line testcase tag filter" continue if exclude_tag and tc.tags.intersection(exclude_tag): discards[instance] = "Command line testcase exclude filter" continue if testcase_filter and tc_name not in testcase_filter: discards[instance] = "Testcase name filter" continue if arch_filter and plat.arch not in arch_filter: discards[instance] = "Command line testcase arch filter" continue if tc.arch_whitelist and plat.arch not in tc.arch_whitelist: discards[instance] = "Not in test case arch whitelist" continue if tc.arch_exclude and plat.arch in tc.arch_exclude: discards[instance] = "In test case arch exclude" continue if tc.platform_exclude and plat.name in tc.platform_exclude: discards[instance] = "In test case platform exclude" continue if tc.toolchain_exclude and toolchain in tc.toolchain_exclude: discards[instance] = "In test case toolchain exclude" continue if platform_filter and plat.name not in platform_filter: discards[instance] = "Command line platform filter" continue if tc.platform_whitelist and plat.name not in tc.platform_whitelist: discards[instance] = "Not in testcase platform whitelist" continue if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist: discards[instance] = "Not in testcase toolchain whitelist" continue if not plat.env_satisfied: discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env)) continue if not force_toolchain \ and toolchain and (toolchain not in plat.supported_toolchains) \ and tc.type != 'unit': discards[instance] = "Not supported by the toolchain" continue if plat.ram < tc.min_ram: discards[instance] = "Not enough RAM" continue if tc.depends_on: dep_intersection = tc.depends_on.intersection(set(plat.supported)) if dep_intersection != set(tc.depends_on): discards[instance] = "No hardware support" continue if plat.flash < tc.min_flash: discards[instance] = "Not enough FLASH" continue if set(plat.ignore_tags) & tc.tags: discards[instance] = "Excluded tags per platform" continue # if nothing stopped us until now, it means this configuration # needs to be added. instance_list.append(instance) # no configurations, so jump to next testcase if not instance_list: continue # if sanitycheck was launched with no platform options at all, we # take all default platforms if default_platforms and not tc.build_on_all: if tc.platform_whitelist: a = set(self.default_platforms) b = set(tc.platform_whitelist) c = a.intersection(b) if c: aa = list(filter(lambda tc: tc.platform.name in c, instance_list)) self.add_instances(aa) else: self.add_instances(instance_list[:1]) else: instances = list(filter(lambda tc: tc.platform.default, instance_list)) self.add_instances(instances) for instance in list(filter(lambda inst: not inst.platform.default, instance_list)): discards[instance] = "Not a default test platform" else: self.add_instances(instance_list) for _, case in self.instances.items(): case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform) self.discards = discards self.selected_platforms = set(p.platform.name for p in self.instances.values()) return discards def add_instances(self, instance_list): for instance in instance_list: self.instances[instance.name] = instance def add_tasks_to_queue(self, test_only=False): for instance in self.instances.values(): if test_only: if instance.run: pipeline.put({"op": "run", "test": instance, "status": "built"}) else: if instance.status not in ['passed', 'skipped']: instance.status = None pipeline.put({"op": "cmake", "test": instance}) return "DONE FEEDING" def execute(self): def calc_one_elf_size(instance): if instance.status not in ["failed", "skipped"]: if instance.platform.type != "native": size_calc = instance.calculate_sizes() instance.metrics["ram_size"] = size_calc.get_ram_size() instance.metrics["rom_size"] = size_calc.get_rom_size() instance.metrics["unrecognized"] = size_calc.unrecognized_sections() else: instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["unrecognized"] = [] instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 logger.info("Adding tasks to the queue...") # We can use a with statement to ensure threads are cleaned up promptly with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor: # start a future for a thread which sends work in through the queue future_to_test = { executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'} while future_to_test: # check for status of the futures which are currently working done, pending = concurrent.futures.wait(future_to_test, timeout=1, return_when=concurrent.futures.FIRST_COMPLETED) # if there is incoming work, start a new future while not pipeline.empty(): # fetch a url from the queue message = pipeline.get() test = message['test'] pb = ProjectBuilder(self, test, lsan=self.enable_lsan, asan=self.enable_asan, coverage=self.enable_coverage, extra_args=self.extra_args, device_testing=self.device_testing, cmake_only=self.cmake_only, cleanup=self.cleanup, valgrind=self.enable_valgrind, inline_logs=self.inline_logs, generator=self.generator, generator_cmd=self.generator_cmd, verbose=self.verbose ) future_to_test[executor.submit(pb.process, message)] = test.name # process any completed futures for future in done: test = future_to_test[future] try: data = future.result() except Exception as exc: logger.error('%r generated an exception: %s' % (test, exc)) sys.exit('%r generated an exception: %s' % (test, exc)) else: if data: logger.debug(data) # remove the now completed future del future_to_test[future] for future in pending: test = future_to_test[future] try: future.result(timeout=180) except concurrent.futures.TimeoutError: logger.warning("{} stuck?".format(test)) if self.enable_size_report and not self.cmake_only: # Parallelize size calculation executor = concurrent.futures.ThreadPoolExecutor(self.jobs) futures = [executor.submit(calc_one_elf_size, instance) for instance in self.instances.values()] concurrent.futures.wait(futures) else: for instance in self.instances.values(): instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 instance.metrics["unrecognized"] = [] def discard_report(self, filename): try: if self.discards is None: raise SanityRuntimeError("apply_filters() hasn't been run!") except Exception as e: logger.error(str(e)) sys.exit(2) with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "reason"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance, reason in sorted(self.discards.items()): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "reason": reason} cw.writerow(rowdict) def target_report(self, outdir, suffix, append=False): platforms = {inst.platform.name for _, inst in self.instances.items()} for platform in platforms: if suffix: filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix)) else: filename = os.path.join(outdir,"{}.xml".format(platform)) self.xunit_report(filename, platform, full_report=True, append=append) @staticmethod def process_log(log_file): filtered_string = "" if os.path.exists(log_file): with open(log_file, "rb") as f: log = f.read().decode("utf-8") filtered_string = ''.join(filter(lambda x: x in string.printable, log)) return filtered_string def xunit_report(self, filename, platform=None, full_report=False, append=False): fails = 0 passes = 0 errors = 0 skips = 0 duration = 0 for _, instance in self.instances.items(): if platform and instance.platform.name != platform: continue handler_time = instance.metrics.get('handler_time', 0) duration += handler_time if full_report: for k in instance.results.keys(): if instance.results[k] == 'PASS': passes += 1 elif instance.results[k] == 'BLOCK': errors += 1 elif instance.results[k] == 'SKIP': skips += 1 else: fails += 1 else: if instance.status in ["failed", "timeout"]: if instance.reason in ['build_error', 'handler_crash']: errors += 1 else: fails += 1 elif instance.status == 'skipped': skips += 1 else: passes += 1 run = "Sanitycheck" eleTestsuite = None # When we re-run the tests, we re-use the results and update only with # the newly run tests. if os.path.exists(filename) and append: tree = ET.parse(filename) eleTestsuites = tree.getroot() eleTestsuite = tree.findall('testsuite')[0] eleTestsuite.attrib['failures'] = "%d" % fails eleTestsuite.attrib['errors'] = "%d" % errors eleTestsuite.attrib['skip'] = "%d" % skips else: eleTestsuites = ET.Element('testsuites') eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (errors + passes + fails + skips), failures="%d" % fails, errors="%d" % (errors), skip="%s" % (skips)) for _, instance in self.instances.items(): if platform and instance.platform.name != platform: continue if full_report: tname = os.path.basename(instance.testcase.name) else: tname = instance.testcase.name # remove testcases that are being re-run from exiting reports if append: for tc in eleTestsuite.findall('testcase'): if tc.get('classname') == "%s:%s" % (instance.platform.name, tname): eleTestsuite.remove(tc) handler_time = instance.metrics.get('handler_time', 0) if full_report: for k in instance.results.keys(): eleTestcase = ET.SubElement( eleTestsuite, 'testcase', classname="%s:%s" % (instance.platform.name, tname), name="%s" % (k), time="%f" % handler_time) if instance.results[k] in ['FAIL', 'BLOCK']: if instance.results[k] == 'FAIL': el = ET.SubElement( eleTestcase, 'failure', type="failure", message="failed") else: el = ET.SubElement( eleTestcase, 'error', type="failure", message="failed") p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name) log_file = os.path.join(p, "handler.log") el.text = self.process_log(log_file) elif instance.results[k] == 'SKIP': el = ET.SubElement( eleTestcase, 'skipped', type="skipped", message="Skipped") else: eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname="%s:%s" % (instance.platform.name, instance.testcase.name), name="%s" % (instance.testcase.name), time="%f" % handler_time) if instance.status in ["failed", "timeout"]: failure = ET.SubElement( eleTestcase, 'failure', type="failure", message=instance.reason) p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name)) bl = os.path.join(p, "build.log") hl = os.path.join(p, "handler.log") log_file = bl if instance.reason != 'Build error': if os.path.exists(hl): log_file = hl else: log_file = bl failure.text = self.process_log(log_file) elif instance.status == "skipped": ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped") result = ET.tostring(eleTestsuites) with open(filename, 'wb') as report: report.write(result) def csv_report(self, filename): with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "status", "extra_args", "handler", "handler_time", "ram_size", "rom_size"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance in self.instances.values(): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "extra_args": " ".join(instance.testcase.extra_args), "handler": instance.platform.simulation} rowdict["status"] = instance.status if instance.status not in ["failed", "timeout"]: if instance.handler: rowdict["handler_time"] = instance.metrics.get("handler_time", 0) ram_size = instance.metrics.get("ram_size", 0) rom_size = instance.metrics.get("rom_size", 0) rowdict["ram_size"] = ram_size rowdict["rom_size"] = rom_size cw.writerow(rowdict) def get_testcase(self, identifier): results = [] for _, tc in self.testcases.items(): for case in tc.cases: if case == identifier: results.append(tc) return results class CoverageTool: """ Base class for every supported coverage tool """ def __init__(self): self.gcov_tool = None self.base_dir = None @staticmethod def factory(tool): if tool == 'lcov': t = Lcov() elif tool == 'gcovr': t = Lcov() else: logger.error("Unsupported coverage tool specified: {}".format(tool)) return None return t @staticmethod def retrieve_gcov_data(intput_file): logger.debug("Working on %s" % intput_file) extracted_coverage_info = {} capture_data = False capture_complete = False with open(intput_file, 'r') as fp: for line in fp.readlines(): if re.search("GCOV_COVERAGE_DUMP_START", line): capture_data = True continue if re.search("GCOV_COVERAGE_DUMP_END", line): capture_complete = True break # Loop until the coverage data is found. if not capture_data: continue if line.startswith("*"): sp = line.split("<") if len(sp) > 1: # Remove the leading delimiter "*" file_name = sp[0][1:] # Remove the trailing new line char hex_dump = sp[1][:-1] else: continue else: continue extracted_coverage_info.update({file_name: hex_dump}) if not capture_data: capture_complete = True return {'complete': capture_complete, 'data': extracted_coverage_info} @staticmethod def create_gcda_files(extracted_coverage_info): logger.debug("Generating gcda files") for filename, hexdump_val in extracted_coverage_info.items(): # if kobject_hash is given for coverage gcovr fails # hence skipping it problem only in gcovr v4.1 if "kobject_hash" in filename: filename = (filename[:-4]) + "gcno" try: os.remove(filename) except Exception: pass continue with open(filename, 'wb') as fp: fp.write(bytes.fromhex(hexdump_val)) def generate(self, outdir): for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True): gcov_data = self.__class__.retrieve_gcov_data(filename) capture_complete = gcov_data['complete'] extracted_coverage_info = gcov_data['data'] if capture_complete: self.__class__.create_gcda_files(extracted_coverage_info) logger.debug("Gcov data captured: {}".format(filename)) else: logger.error("Gcov data capture incomplete: {}".format(filename)) with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog: ret = self._generate(outdir, coveragelog) if ret == 0: logger.info("HTML report generated: {}".format( os.path.join(outdir, "coverage", "index.html"))) class Lcov(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('*' + pattern + '*') def add_ignore_directory(self, pattern): self.ignores.append(pattern + '/*') def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.info") ztestfile = os.path.join(outdir, "ztest.info") subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--capture", "--directory", outdir, "--rc", "lcov_branch_coverage=1", "--output-file", coveragefile], stdout=coveragelog) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract", coveragefile, os.path.join(self.base_dir, "tests", "ztest", "*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove", ztestfile, os.path.join(self.base_dir, "tests/ztest/test/*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) files = [coveragefile, ztestfile] else: files = [coveragefile] for i in self.ignores: subprocess.call( ["lcov", "--gcov-tool", self.gcov_tool, "--remove", coveragefile, i, "--output-file", coveragefile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) # The --ignore-errors source option is added to avoid it exiting due to # samples/application_development/external_lib/ return subprocess.call(["genhtml", "--legend", "--branch-coverage", "--ignore-errors", "source", "-output-directory", os.path.join(outdir, "coverage")] + files, stdout=coveragelog) class Gcovr(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('.*' + pattern + '.*') def add_ignore_directory(self, pattern): self.ignores.append(pattern + '/.*') @staticmethod def _interleave_list(prefix, list): tuple_list = [(prefix, item) for item in list] return [item for sublist in tuple_list for item in sublist] def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.json") ztestfile = os.path.join(outdir, "ztest.json") excludes = Gcovr._interleave_list("-e", self.ignores) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o", coveragefile, outdir], stdout=coveragelog) subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-f", "tests/ztest", "-e", "tests/ztest/test/*", "--json", "-o", ztestfile, outdir], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: files = [coveragefile, ztestfile] else: files = [coveragefile] subdir = os.path.join(outdir, "coverage") os.makedirs(subdir, exist_ok=True) tracefiles = self._interleave_list("--add-tracefile", files) return subprocess.call(["gcovr", "-r", self.base_dir, "--html", "--html-details"] + tracefiles + ["-o", os.path.join(subdir, "index.html")], stdout=coveragelog) class HardwareMap: schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml") manufacturer = [ 'ARM', 'SEGGER', 'MBED', 'STMicroelectronics', 'Atmel Corp.', 'Texas Instruments', 'Silicon Labs', 'NXP Semiconductors', 'Microchip Technology Inc.', 'FTDI', 'Digilent' ] runner_mapping = { 'pyocd': [ 'DAPLink CMSIS-DAP', 'MBED CMSIS-DAP' ], 'jlink': [ 'J-Link', 'J-Link OB' ], 'openocd': [ 'STM32 STLink', '^XDS110.*' ], 'dediprog': [ 'TTL232R-3V3', 'MCP2200 USB Serial Port Emulator' ] } def __init__(self): self.detected = [] self.connected_hardware = [] def load_device_from_cmdline(self, serial, platform): device = { "serial": serial, "platform": platform, "counter": 0, "available": True, "connected": True } self.connected_hardware.append(device) def load_hardware_map(self, map_file): hwm_schema = scl.yaml_load(self.schema_path) self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema) for i in self.connected_hardware: i['counter'] = 0 def scan_hw(self, persistent=False): from serial.tools import list_ports if persistent and platform.system() == 'Linux': # On Linux, /dev/serial/by-id provides symlinks to # '/dev/ttyACMx' nodes using names which are unique as # long as manufacturers fill out USB metadata nicely. # # This creates a map from '/dev/ttyACMx' device nodes # to '/dev/serial/by-id/usb-...' symlinks. The symlinks # go into the hardware map because they stay the same # even when the user unplugs / replugs the device. # # Some inexpensive USB/serial adapters don't result # in unique names here, though, so use of this feature # requires explicitly setting persistent=True. by_id = Path('/dev/serial/by-id') def readlink(link): return str((by_id / link).resolve()) persistent_map = {readlink(link): str(link) for link in by_id.iterdir()} else: persistent_map = {} serial_devices = list_ports.comports() logger.info("Scanning connected hardware...") for d in serial_devices: if d.manufacturer in self.manufacturer: # TI XDS110 can have multiple serial devices for a single board # assume endpoint 0 is the serial, skip all others if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'): continue s_dev = {} s_dev['platform'] = "unknown" s_dev['id'] = d.serial_number s_dev['serial'] = persistent_map.get(d.device, d.device) s_dev['product'] = d.product s_dev['runner'] = 'unknown' for runner, _ in self.runner_mapping.items(): products = self.runner_mapping.get(runner) if d.product in products: s_dev['runner'] = runner continue # Try regex matching for p in products: if re.match(p, d.product): s_dev['runner'] = runner s_dev['available'] = True s_dev['connected'] = True self.detected.append(s_dev) else: logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d)) def write_map(self, hwm_file): # use existing map if os.path.exists(hwm_file): with open(hwm_file, 'r') as yaml_file: hwm = yaml.load(yaml_file, Loader=yaml.FullLoader) # disconnect everything for h in hwm: h['connected'] = False h['serial'] = None for d in self.detected: for h in hwm: if d['id'] == h['id'] and d['product'] == h['product']: h['connected'] = True h['serial'] = d['serial'] d['match'] = True new = list(filter(lambda n: not n.get('match', False), self.detected)) hwm = hwm + new logger.info("Registered devices:") self.dump(hwm) with open(hwm_file, 'w') as yaml_file: yaml.dump(hwm, yaml_file, default_flow_style=False) else: # create new file with open(hwm_file, 'w') as yaml_file: yaml.dump(self.detected, yaml_file, default_flow_style=False) logger.info("Detected devices:") self.dump(self.detected) @staticmethod def dump(hwmap=[], filtered=[], header=[], connected_only=False): print("") table = [] if not header: header = ["Platform", "ID", "Serial device"] for p in sorted(hwmap, key=lambda i: i['platform']): platform = p.get('platform') connected = p.get('connected', False) if filtered and platform not in filtered: continue if not connected_only or connected: table.append([platform, p.get('id', None), p.get('serial')]) print(tabulate(table, headers=header, tablefmt="github")) def size_report(sc): logger.info(sc.filename) logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE") for i in range(len(sc.sections)): v = sc.sections[i] logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" % (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"], v["type"])) logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" % (sc.rom_size, sc.ram_size)) logger.info("") def export_tests(filename, tests): with open(filename, "wt") as csvfile: fieldnames = ['section', 'subsection', 'title', 'reference'] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) for test in tests: data = test.split(".") if len(data) > 1: subsec = " ".join(data[1].split("_")).title() rowdict = { "section": data[0].capitalize(), "subsection": subsec, "title": test, "reference": test } cw.writerow(rowdict) else: logger.info("{} can't be exported".format(test))
02_hisdata.py
import os import struct import pandas as pd import numpy as np import time, datetime import multiprocessing def readTdxLdayFile(fname="data/sh000001.day"): dataSet=[] with open(fname,'rb') as fl: buffer=fl.read() #读取数据到缓存 size=len(buffer) rowSize=32 #通信达day数据,每32个字节一组数据 code=os.path.basename(fname).replace('.day','') for i in range(0,size,rowSize): #步长为32遍历buffer row=list( struct.unpack('IIIIIfII',buffer[i:i+rowSize]) ) row[1]=row[1]/100 row[2]=row[2]/100 row[3]=row[3]/100 row[4]=row[4]/100 row.pop() #移除最后无意义字段 row.insert(0,code) dataSet.append(row) data=pd.DataFrame(data=dataSet,columns=['code','tradeDate','open','high','low','close','amount','vol']) data['date'] = data['tradeDate'].apply(lambda x: datetime.datetime.strptime(str(x),'%Y%m%d').date()) data=data.set_index(['date']) #.sort_index return code, data def recalc(code, data): if data.size == 0: return beg_date = data.index[0] end_date = data.index[-1] today = datetime.datetime.today().date() # date_p = datetime.datetime.strptime(str(data.tradeDate[0]),'%Y%m%d').date() # newdata=pd.DataFrame() # prerow=None ds = None while beg_date <= today: if beg_date in data.index: ds = data.loc[beg_date] else: ds.name = beg_date data = data.append(ds) beg_date = beg_date + datetime.timedelta(1) # for idx,row in data.iterrows(): # if str(row['tradeDate']) == str(date_p).replace('-',''): # prerow=row # output=output.append(row) # date_p=date_p+datetime.timedelta(1) # else: # print(code, row['tradeDate']) # while(int(str(date_p).replace('-','')) < row['tradeDate']): # prerow['tradeDate']=int(str(date_p).replace('-','')) # output=output.append(prerow) # date_p=date_p+datetime.timedelta(1) # prerow=row # output=output.append(row) # date_p=date_p+datetime.timedelta(1) data = data.sort_index() data.to_csv('%s.csv'%code) return data def asyncCalc(fname, queue): code, df = readTdxLdayFile(fname) # queue.put(recalc(code, df)) recalc(code, df) def readPath(path): files = os.listdir(path) # codes=[] q = multiprocessing.Queue() jobs = [] # dataSet=[]multiprocessing pool_size = multiprocessing.cpu_count() pool = multiprocessing.Pool(pool_size) output=pd.DataFrame() for i in range(0,len(files)): fname = os.path.join(path,files[i]) if os.path.isdir(fname): continue pool.apply_async(asyncCalc, args=(fname)) p = multiprocessing.Process(target=asyncCalc, args=(fname, q)) jobs.append(p) p.start() for p in jobs: p.join() # for j in jobs: # t = q.get() # if t is not None: # output=output.append(t) return output readPath('data') #读取目录下面的所有文件 # print(output.head()) # output.to_csv('test-org.csv') # output.query('dfh==True and dfc1==True and dfc2==True').to_csv('test-data.csv')
model_parallel_thread.py
import torch import torch.nn as nn import torch.optim as optim import threading import torch.multiprocessing as mp import concurrent.futures def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise Exception('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise Exception("Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise Exception("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def _forward_impl(self, x): # See note [TorchScript super()] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def forward(self, x): return self._forward_impl(x) def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) return model def resnet50(pretrained=False, progress=True, **kwargs): return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) #################### DL CODE ################ num_classes = 1000 class ModelParallelResNet50(ResNet): def __init__(self, *args, **kwargs): super(ModelParallelResNet50, self).__init__( Bottleneck, [3, 4, 6, 3], num_classes=num_classes, *args, **kwargs) self.seq1 = nn.Sequential( self.conv1, self.bn1, self.relu, self.maxpool, self.layer1, self.layer2 ).to('cuda:0') self.seq2 = nn.Sequential( self.layer3, self.layer4, self.avgpool, ).to('cuda:1') self.fc.to('cuda:1') def forward(self, x): x = self.seq2(self.seq1(x).to('cuda:1')) return self.fc(x.view(x.size(0), -1)) class PipelineParallelResNet50(ModelParallelResNet50): def __init__(self, split_size=20, *args, **kwargs): super(PipelineParallelResNet50, self).__init__(*args, **kwargs) self.split_size = split_size def taskA(self, s_prev, ret): s_prev = self.seq2(s_prev) ret.append(self.fc(s_prev.view(s_prev.size(0), -1))) def taskB(self, s_next): s_prev = self.seq1(s_next).to('cuda:1') return s_prev def forward(self, x): splits = iter(x.split(self.split_size, dim=0)) s_next = next(splits) s_prev = self.seq1(s_next).to('cuda:1') ret = [] for s_next in splits: # A. s_prev runs on cuda:1 # self.taskA(s_prev=s_prev, ret=ret) with concurrent.futures.ThreadPoolExecutor() as executor: futureA = executor.submit(self.taskA, s_prev, ret) futureA.result() #x = threading.Thread(target=self.taskA, args=(s_prev, ret)) #x.start() #p = mp.Process(target=self.taskA, args=(s_prev, ret)) #p.start() # B. s_next runs on cuda:0, which can run concurrently with A #y = threading.Thread(target=self.taskB, args=(s_next)) with concurrent.futures.ThreadPoolExecutor() as executor: futureB = executor.submit(self.taskB, s_next) s_prev = futureB.result() #y.start() #x.join() s_prev = self.seq2(s_prev) ret.append(self.fc(s_prev.view(s_prev.size(0), -1))) return torch.cat(ret) num_batches = 1 batch_size = 120 image_w = 128 image_h = 128 def train(model): model.train(True) loss_fn = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001) one_hot_indices = torch.LongTensor(batch_size) \ .random_(0, num_classes) \ .view(batch_size, 1) for _ in range(num_batches): # generate random inputs and labels inputs = torch.randn(batch_size, 3, image_w, image_h) labels = torch.zeros(batch_size, num_classes) \ .scatter_(1, one_hot_indices, 1) # run forward pass optimizer.zero_grad() outputs = model(inputs.to('cuda:0')) # print("Output-device {}".format(outputs.device)) # run backward pass labels = labels.to(outputs.device) loss_fn(outputs, labels).backward() optimizer.step() ######### print("Running Model Parallel Resnet50") import matplotlib.pyplot as plt plt.switch_backend('Agg') import numpy as np import timeit num_repeat = 3 stmt = "train(model.share_memory())" setup = "model = ModelParallelResNet50()" # globals arg is only available in Python 3. In Python 2, use the following # import __builtin__ # __builtin__.__dict__.update(locals()) mp_run_times = timeit.repeat( stmt, setup, number=1, repeat=num_repeat, globals=globals()) mp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times) setup = "model = resnet50(num_classes=num_classes).to('cuda:0')" rn_run_times = timeit.repeat( stmt, setup, number=1, repeat=num_repeat, globals=globals()) rn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times) def plot(means, stds, labels, fig_name): fig, ax = plt.subplots() ax.bar(np.arange(len(means)), means, yerr=stds, align='center', alpha=0.5, ecolor='red', capsize=10, width=0.6) ax.set_ylabel('ResNet50 Execution Time (Second)') ax.set_xticks(np.arange(len(means))) ax.set_xticklabels(labels) ax.yaxis.grid(True) plt.tight_layout() plt.savefig(fig_name) plt.close(fig) # plot([mp_mean, rn_mean], # [mp_std, rn_std], # ['Model Parallel', 'Single GPU'], # 'mp_vs_rn.png') ########### Pipeline Parallel ################ print("Running Pipeline Parallel ResNet50 Once for Split 20") setup = "model = PipelineParallelResNet50()" pp_run_times = timeit.repeat( stmt, setup, number=1, repeat=num_repeat, globals=globals()) pp_mean, pp_std = np.mean(pp_run_times), np.std(pp_run_times) # plot([mp_mean, rn_mean, pp_mean], # [mp_std, rn_std, pp_std], # ['Model Parallel', 'Single GPU', 'Pipelining Model Parallel'], # 'mp_vs_rn_vs_pp.png') ##### Variable Split Sizes for Batch ##### print("Running Pipeline Parallel ResNet50 for multiple split sizes") means = [] stds = [] split_sizes = [1, 2, 4, 5, 10, 20, 50, 100] split_sizes = [1, 3, 5, 8, 10, 12, 15, 20, 30, 40, 60] for split_size in split_sizes: print("Split Size {}".format(split_size)) setup = "model = PipelineParallelResNet50(split_size=%d)" % split_size pp_run_times = timeit.repeat( stmt, setup, number=1, repeat=num_repeat, globals=globals()) means.append(np.mean(pp_run_times)) stds.append(np.std(pp_run_times)) ########################################### print("Model Parallel Mean {}, Single Node Mean{}, Pipeline Mean {} ".format(mp_mean, rn_mean, pp_mean)) print("Pipeline Variables : {}".format(means))
primes_queue_less_work.py
import math import time import multiprocessing from multiprocessing import Pool FLAG_ALL_DONE = b"WORK_FINISHED" FLAG_WORKER_FINISHED_PROCESSING = b"WORKER_FINISHED_PROCESSING" def check_prime(possible_primes_queue, definite_primes_queue): while True: n = possible_primes_queue.get() if n == FLAG_ALL_DONE: # flag that our results have all been pushed to the results queue definite_primes_queue.put(FLAG_WORKER_FINISHED_PROCESSING) break else: assert n % 2 != 0 # this version has no even numbered numbers for i in range(3, int(math.sqrt(n)) + 1, 2): if n % i == 0: break else: definite_primes_queue.put(n) if __name__ == "__main__": primes = [] manager = multiprocessing.Manager() possible_primes_queue = manager.Queue() # We could limit the input queue size with e.g. `maxsize=3` definite_primes_queue = manager.Queue() NBR_PROCESSES = 8 pool = Pool(processes=NBR_PROCESSES) processes = [] for _ in range(NBR_PROCESSES): p = multiprocessing.Process(target=check_prime, args=(possible_primes_queue, definite_primes_queue)) processes.append(p) p.start() t1 = time.time() #number_range = range(100000000, 100010000) # A #number_range = range(100000001, 100100000, 2) # B2 number_range = range(100000001, 101000000, 2) # C2 #number_range = range(1000000000, 1000100000) # D #number_range = range(100000000000, 100000100000) # E for possible_prime in number_range: possible_primes_queue.put(possible_prime) print("ALL JOBS ADDED TO THE QUEUE") # add poison pills to stop the remote workers for n in range(NBR_PROCESSES): possible_primes_queue.put(FLAG_ALL_DONE) print("NOW WAITING FOR RESULTS...") processors_indicating_they_have_finished = 0 while True: new_result = definite_primes_queue.get() # block whilst waiting for results if new_result == FLAG_WORKER_FINISHED_PROCESSING: print( "WORKER {} HAS JUST FINISHED".format(processors_indicating_they_have_finished)) processors_indicating_they_have_finished += 1 if processors_indicating_they_have_finished == NBR_PROCESSES: break else: primes.append(new_result) assert processors_indicating_they_have_finished == NBR_PROCESSES print("Took:", time.time() - t1) print(len(primes), primes[:10], primes[-10:])
test_postgresql.py
import mock # for the mock.call method, importing it without a namespace breaks python3 import os import psycopg2 import re import subprocess import time from mock import Mock, MagicMock, PropertyMock, patch, mock_open from patroni.async_executor import CriticalTask from patroni.dcs import Cluster, ClusterConfig, Member, RemoteMember, SyncState from patroni.exceptions import PostgresConnectionException, PatroniException from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE from patroni.postgresql.postmaster import PostmasterProcess from patroni.postgresql.slots import SlotsHandler from patroni.utils import RetryFailedError from six.moves import builtins from threading import Thread, current_thread from . import BaseTestPostgresql, MockCursor, MockPostmaster, psycopg2_connect mtime_ret = {} def mock_mtime(filename): if filename not in mtime_ret: mtime_ret[filename] = time.time() else: mtime_ret[filename] += 1 return mtime_ret[filename] def pg_controldata_string(*args, **kwargs): return b""" pg_control version number: 942 Catalog version number: 201509161 Database system identifier: 6200971513092291716 Database cluster state: shut down in recovery pg_control last modified: Fri Oct 2 10:57:06 2015 Latest checkpoint location: 0/30000C8 Prior checkpoint location: 0/2000060 Latest checkpoint's REDO location: 0/3000090 Latest checkpoint's REDO WAL file: 000000020000000000000003 Latest checkpoint's TimeLineID: 2 Latest checkpoint's PrevTimeLineID: 2 Latest checkpoint's full_page_writes: on Latest checkpoint's NextXID: 0/943 Latest checkpoint's NextOID: 24576 Latest checkpoint's NextMultiXactId: 1 Latest checkpoint's NextMultiOffset: 0 Latest checkpoint's oldestXID: 931 Latest checkpoint's oldestXID's DB: 1 Latest checkpoint's oldestActiveXID: 943 Latest checkpoint's oldestMultiXid: 1 Latest checkpoint's oldestMulti's DB: 1 Latest checkpoint's oldestCommitTs: 0 Latest checkpoint's newestCommitTs: 0 Time of latest checkpoint: Fri Oct 2 10:56:54 2015 Fake LSN counter for unlogged rels: 0/1 Minimum recovery ending location: 0/30241F8 Min recovery ending loc's timeline: 2 Backup start location: 0/0 Backup end location: 0/0 End-of-backup record required: no wal_level setting: hot_standby Current wal_log_hints setting: on Current max_connections setting: 100 Current max_worker_processes setting: 8 Current max_prepared_xacts setting: 0 Current max_locks_per_xact setting: 64 Current track_commit_timestamp setting: off Maximum data alignment: 8 Database block size: 8192 Blocks per segment of large relation: 131072 WAL block size: 8192 Bytes per WAL segment: 16777216 Maximum length of identifiers: 64 Maximum columns in an index: 32 Maximum size of a TOAST chunk: 1996 Size of a large-object chunk: 2048 Date/time type storage: 64-bit integers Float4 argument passing: by value Float8 argument passing: by value Data page checksum version: 0 """ @patch('subprocess.call', Mock(return_value=0)) @patch('psycopg2.connect', psycopg2_connect) class TestPostgresql(BaseTestPostgresql): @patch('subprocess.call', Mock(return_value=0)) @patch('os.rename', Mock()) @patch('patroni.postgresql.CallbackExecutor', Mock()) @patch.object(Postgresql, 'get_major_version', Mock(return_value=120000)) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) def setUp(self): super(TestPostgresql, self).setUp() self.p.config.write_postgresql_conf() self.p._callback_executor = Mock() @patch('subprocess.Popen') @patch.object(Postgresql, 'wait_for_startup') @patch.object(Postgresql, 'wait_for_port_open') @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, 'controldata', Mock()) def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen): mock_is_running.return_value = MockPostmaster() mock_wait_for_port_open.return_value = True mock_wait_for_startup.return_value = False mock_popen.return_value.stdout.readline.return_value = '123' self.assertTrue(self.p.start()) mock_is_running.return_value = None mock_postmaster = MockPostmaster() with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster): pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf') open(pg_conf, 'w').close() self.assertFalse(self.p.start(task=CriticalTask())) with open(pg_conf) as f: lines = f.readlines() self.assertTrue("f.oo = 'bar'\n" in lines) mock_wait_for_startup.return_value = None self.assertFalse(self.p.start(10)) self.assertIsNone(self.p.start()) mock_wait_for_port_open.return_value = False self.assertFalse(self.p.start()) task = CriticalTask() task.cancel() self.assertFalse(self.p.start(task=task)) self.p.cancellable.cancel() self.assertFalse(self.p.start()) @patch.object(Postgresql, 'pg_isready') @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1))) def test_wait_for_port_open(self, mock_pg_isready): mock_pg_isready.return_value = STATE_NO_RESPONSE mock_postmaster = MockPostmaster(is_running=False) # No pid file and postmaster death self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) mock_postmaster.is_running.return_value = True # timeout self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) # pg_isready failure mock_pg_isready.return_value = 'garbage' self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1)) # cancelled self.p.cancellable.cancel() self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1)) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running') @patch.object(Postgresql, '_wait_for_connection_close', Mock()) def test_stop(self, mock_is_running): # Postmaster is not running mock_callback = Mock() mock_is_running.return_value = None self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() # Is running, stopped successfully mock_is_running.return_value = mock_postmaster = MockPostmaster() mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() mock_postmaster.signal_stop.assert_called() # Stop signal failed mock_postmaster.signal_stop.return_value = False self.assertFalse(self.p.stop()) # Stop signal failed to find process mock_postmaster.signal_stop.return_value = True mock_callback.reset_mock() self.assertTrue(self.p.stop(on_safepoint=mock_callback)) mock_callback.assert_called() def test_restart(self): self.p.start = Mock(return_value=False) self.assertFalse(self.p.restart()) self.assertEqual(self.p.state, 'restart failed (restarting)') @patch('os.chmod', Mock()) @patch.object(builtins, 'open', MagicMock()) def test_write_pgpass(self): self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'}) self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'}) def test_checkpoint(self): with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))): self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true') with patch.object(MockCursor, 'execute', Mock(return_value=None)): self.assertIsNone(self.p.checkpoint()) self.assertEqual(self.p.checkpoint(), 'not accessible or not healty') @patch('patroni.postgresql.config.mtime', mock_mtime) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test_check_recovery_conf(self, mock_get_pg_settings): mock_get_pg_settings.return_value = { 'primary_conninfo': ['primary_conninfo', 'foo=', None, 'string', 'postmaster', self.p.config._auto_conf], 'recovery_min_apply_delay': ['recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup', 'foo'] } self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on'}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = '' mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][5] = self.p.config._auto_conf self.assertEqual(self.p.config.check_recovery_conf(None), (True, False)) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) conninfo = {'host': '1', 'password': 'bar'} with patch('patroni.postgresql.config.ConfigHandler.primary_conninfo_params', Mock(return_value=conninfo)): mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 passfile='\ + re.sub(r'([\'\\ ])', r'\\\1', self.p.config._pgpass) mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0' self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': conninfo.copy()}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000)) @patch.object(Postgresql, 'is_running', MockPostmaster) @patch.object(MockPostmaster, 'create_time', Mock(return_value=1234567), create=True) @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings') def test__read_recovery_params(self, mock_get_pg_settings): mock_get_pg_settings.return_value = {'primary_conninfo': ['primary_conninfo', '', None, 'string', 'postmaster', self.p.config._postgresql_conf]} self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}}) self.p.config.write_postgresql_conf() self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) self.assertEqual(self.p.config.check_recovery_conf(None), (False, False)) mock_get_pg_settings.side_effect = Exception with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) @patch.object(Postgresql, 'major_version', PropertyMock(return_value=100000)) def test__read_recovery_params_pre_v12(self): self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}}) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) self.p.config.write_recovery_conf({'standby_mode': '\n'}) with patch('patroni.postgresql.config.mtime', mock_mtime): self.assertEqual(self.p.config.check_recovery_conf(None), (True, True)) def test_write_postgresql_and_sanitize_auto_conf(self): read_data = 'primary_conninfo = foo\nfoo = bar\n' with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'), 'w') as f: f.write(read_data) mock_read_auto = mock_open(read_data=read_data) mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '') with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])),\ patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), IOError])), patch('os.chmod', Mock()): self.p.config.write_postgresql_conf() self.p.config.write_recovery_conf({'foo': 'bar'}) self.p.config.write_postgresql_conf() @patch.object(Postgresql, 'is_running', Mock(return_value=False)) @patch.object(Postgresql, 'start', Mock()) def test_follow(self): self.p.call_nowait('on_start') m = RemoteMember('1', {'restore_command': '2', 'primary_slot_name': 'foo', 'conn_kwargs': {'host': 'bar'}}) self.p.follow(m) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) def test_sync_replication_slots(self): self.p.start() config = ClusterConfig(1, {'slots': {'test_3': {'database': 'a', 'plugin': 'b'}, 'A': 0, 'ls': 0, 'b': {'type': 'logical', 'plugin': '1'}}}, 1) cluster = Cluster(True, config, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None) with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)): self.p.slots_handler.sync_replication_slots(cluster) self.p.slots_handler.sync_replication_slots(cluster) with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')): self.p.slots_handler.sync_replication_slots(cluster) with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\ patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock: alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'}) alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'}) cluster.members.extend([alias1, alias2]) self.p.slots_handler.sync_replication_slots(cluster) self.assertEqual(errorlog_mock.call_count, 5) ca = errorlog_mock.call_args_list[0][0][1] self.assertTrue("test-3" in ca, "non matching {0}".format(ca)) self.assertTrue("test.3" in ca, "non matching {0}".format(ca)) @patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError)) def test__query(self): self.assertRaises(PostgresConnectionException, self.p._query, 'blabla') self.p._state = 'restarting' self.assertRaises(RetryFailedError, self.p._query, 'blabla') def test_query(self): self.p.query('select 1') self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError') self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla') @patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT)) def test_is_leader(self): self.assertTrue(self.p.is_leader()) self.p.reset_cluster_info_state() with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))): self.assertRaises(PostgresConnectionException, self.p.is_leader) def test_reload(self): self.assertTrue(self.p.reload()) @patch.object(Postgresql, 'is_running') def test_is_healthy(self, mock_is_running): mock_is_running.return_value = True self.assertTrue(self.p.is_healthy()) mock_is_running.return_value = False self.assertFalse(self.p.is_healthy()) def test_promote(self): self.p.set_role('replica') self.assertIsNone(self.p.promote(0)) self.assertTrue(self.p.promote(0)) def test_timeline_wal_position(self): self.assertEqual(self.p.timeline_wal_position(), (1, 2, 1)) Thread(target=self.p.timeline_wal_position).start() @patch.object(PostmasterProcess, 'from_pidfile') def test_is_running(self, mock_frompidfile): # Cached postmaster running mock_postmaster = self.p._postmaster_proc = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster) # Cached postmaster not running, no postmaster running mock_postmaster.is_running.return_value = False mock_frompidfile.return_value = None self.assertEqual(self.p.is_running(), None) self.assertEqual(self.p._postmaster_proc, None) # No cached postmaster, postmaster running mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster() self.assertEqual(self.p.is_running(), mock_postmaster2) self.assertEqual(self.p._postmaster_proc, mock_postmaster2) @patch('shlex.split', Mock(side_effect=OSError)) def test_call_nowait(self): self.p.set_role('replica') self.assertIsNone(self.p.call_nowait('on_start')) self.p.bootstrapping = True self.assertIsNone(self.p.call_nowait('on_start')) def test_non_existing_callback(self): self.assertFalse(self.p.call_nowait('foobar')) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) def test_is_leader_exception(self): self.p.start() self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported")) self.assertTrue(self.p.stop()) @patch('os.rename', Mock()) @patch('os.path.isdir', Mock(return_value=True)) def test_move_data_directory(self): self.p.move_data_directory() with patch('os.rename', Mock(side_effect=OSError)): self.p.move_data_directory() @patch('os.listdir', Mock(return_value=['recovery.conf'])) @patch('os.path.exists', Mock(return_value=True)) @patch.object(Postgresql, 'controldata', Mock()) def test_get_postgres_role_from_data_directory(self): self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica') def test_remove_data_directory(self): def _symlink(src, dst): try: os.symlink(src, dst) except OSError: if os.name == 'nt': # os.symlink under Windows needs admin rights skip it pass os.makedirs(os.path.join(self.p.data_dir, 'foo')) _symlink('foo', os.path.join(self.p.data_dir, 'pg_wal')) self.p.remove_data_directory() open(self.p.data_dir, 'w').close() self.p.remove_data_directory() _symlink('unexisting', self.p.data_dir) with patch('os.unlink', Mock(side_effect=OSError)): self.p.remove_data_directory() self.p.remove_data_directory() @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) def test_controldata(self): with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)): data = self.p.controldata() self.assertEqual(len(data), 50) self.assertEqual(data['Database cluster state'], 'shut down in recovery') self.assertEqual(data['wal_log_hints setting'], 'on') self.assertEqual(int(data['Database block size']), 8192) with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))): self.assertEqual(self.p.controldata(), {}) @patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True)) @patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string)) def test_sysid(self): self.assertEqual(self.p.sysid, "6200971513092291716") @patch('os.path.isfile', Mock(return_value=True)) @patch('shutil.copy', Mock(side_effect=IOError)) def test_save_configuration_files(self): self.p.config.save_configuration_files() @patch('os.path.isfile', Mock(side_effect=[False, True])) @patch('shutil.copy', Mock(side_effect=IOError)) def test_restore_configuration_files(self): self.p.config.restore_configuration_files() def test_can_create_replica_without_replication_connection(self): self.p.config._config['create_replica_method'] = [] self.assertFalse(self.p.can_create_replica_without_replication_connection()) self.p.config._config['create_replica_method'] = ['wale', 'basebackup'] self.p.config._config['wale'] = {'command': 'foo', 'no_master': 1} self.assertTrue(self.p.can_create_replica_without_replication_connection()) def test_replica_method_can_work_without_replication_connection(self): self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup')) self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar')) self.p.config._config['foo'] = {'command': 'bar', 'no_master': 1} self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo')) self.p.config._config['foo'] = {'command': 'bar'} self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo')) @patch('time.sleep', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=True)) @patch.object(MockCursor, 'fetchone') def test_reload_config(self, mock_fetchone): mock_fetchone.return_value = (1,) parameters = self._PARAMETERS.copy() parameters.pop('f.oo') parameters['wal_buffers'] = '512' config = {'pg_hba': [''], 'pg_ident': [''], 'use_unix_socket': True, 'authentication': {}, 'retry_timeout': 10, 'listen': '*', 'krbsrvname': 'postgres', 'parameters': parameters} self.p.reload_config(config) mock_fetchone.side_effect = Exception parameters['b.ar'] = 'bar' self.p.reload_config(config) parameters['autovacuum'] = 'on' self.p.reload_config(config) parameters['autovacuum'] = 'off' parameters.pop('search_path') config['listen'] = '*:5433' self.p.reload_config(config) parameters['unix_socket_directories'] = '.' self.p.reload_config(config) self.p.config.resolve_connection_addresses() @patch.object(Postgresql, '_version_file_exists', Mock(return_value=True)) def test_get_major_version(self): with patch.object(builtins, 'open', mock_open(read_data='9.4')): self.assertEqual(self.p.get_major_version(), 90400) with patch.object(builtins, 'open', Mock(side_effect=Exception)): self.assertEqual(self.p.get_major_version(), 0) def test_postmaster_start_time(self): with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))): self.assertEqual(self.p.postmaster_start_time(), 'foo') t = Thread(target=self.p.postmaster_start_time) t.start() t.join() with patch.object(MockCursor, "execute", side_effect=psycopg2.Error): self.assertIsNone(self.p.postmaster_start_time()) def test_check_for_startup(self): with patch('subprocess.call', return_value=0): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=1): self.p._state = 'starting' self.assertTrue(self.p.check_for_startup()) self.assertEqual(self.p.state, 'starting') with patch('subprocess.call', return_value=2): self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'start failed') with patch('subprocess.call', return_value=0): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') with patch('subprocess.call', return_value=127): self.p._state = 'running' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') self.p._state = 'starting' self.assertFalse(self.p.check_for_startup()) self.assertEqual(self.p.state, 'running') def test_wait_for_startup(self): state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0} self.__thread_ident = current_thread().ident def increment_sleeps(*args): if current_thread().ident == self.__thread_ident: print("Sleep") state['sleeps'] += 1 def isready_return(*args): ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return'] print("Isready {0} {1}".format(ret, state)) return ret def time_in_state(*args): return state['sleeps'] with patch('subprocess.call', side_effect=isready_return): with patch('time.sleep', side_effect=increment_sleeps): self.p.time_in_state = Mock(side_effect=time_in_state) self.p._state = 'stopped' self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 0) self.p._state = 'starting' state['num_rejects'] = 5 self.assertTrue(self.p.wait_for_startup()) self.assertEqual(state['sleeps'], 5) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 2 self.assertFalse(self.p.wait_for_startup()) self.p._state = 'starting' state['sleeps'] = 0 state['final_return'] = 0 self.assertFalse(self.p.wait_for_startup(timeout=2)) self.assertEqual(state['sleeps'], 3) with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)): self.p.cancellable.cancel() self.p._state = 'starting' self.assertIsNone(self.p.wait_for_startup()) def test_pick_sync_standby(self): cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None, SyncState(0, self.me.name, self.leadermem.name), None) with patch.object(Postgresql, "query", return_value=[ (self.leadermem.name, 'streaming', 'sync'), (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True)) with patch.object(Postgresql, "query", return_value=[ (self.me.name, 'streaming', 'async'), (self.leadermem.name, 'streaming', 'potential'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False)) with patch.object(Postgresql, "query", return_value=[ (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False)) with patch.object(Postgresql, "query", return_value=[ ('missing', 'streaming', 'sync'), (self.me.name, 'streaming', 'async'), (self.other.name, 'streaming', 'async'), ]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False)) with patch.object(Postgresql, "query", return_value=[]): self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False)) def test_set_sync_standby(self): def value_in_conf(): with open(os.path.join(self.p.data_dir, 'postgresql.conf')) as f: for line in f: if line.startswith('synchronous_standby_names'): return line.strip() mock_reload = self.p.reload = Mock() self.p.config.set_synchronous_standby('n1') self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'") mock_reload.assert_called() mock_reload.reset_mock() self.p.config.set_synchronous_standby('n1') mock_reload.assert_not_called() self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'") self.p.config.set_synchronous_standby('n2') mock_reload.assert_called() self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'") mock_reload.reset_mock() self.p.config.set_synchronous_standby(None) mock_reload.assert_called() self.assertEqual(value_in_conf(), None) def test_get_server_parameters(self): config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'} self.p.config.get_server_parameters(config) config['synchronous_mode_strict'] = True self.p.config.get_server_parameters(config) self.p.config.set_synchronous_standby('foo') self.assertTrue(str(self.p.config.get_server_parameters(config)).startswith('{')) @patch('time.sleep', Mock()) def test__wait_for_connection_close(self): mock_postmaster = MockPostmaster() with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)): mock_postmaster.is_running.side_effect = [True, False, False] mock_callback = Mock() self.p.stop(on_safepoint=mock_callback) mock_postmaster.is_running.side_effect = [True, False, False] with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)): self.p.stop(on_safepoint=mock_callback) def test_terminate_starting_postmaster(self): mock_postmaster = MockPostmaster() self.p.terminate_starting_postmaster(mock_postmaster) mock_postmaster.signal_stop.assert_called() mock_postmaster.wait.assert_called() def test_read_postmaster_opts(self): m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \ "--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \ "--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n') with patch.object(builtins, 'open', m): data = self.p.read_postmaster_opts() self.assertEqual(data['wal_level'], 'hot_standby') self.assertEqual(int(data['max_replication_slots']), 5) self.assertEqual(data.get('D'), None) m.side_effect = IOError data = self.p.read_postmaster_opts() self.assertEqual(data, dict()) @patch('psutil.Popen') def test_single_user_mode(self, subprocess_popen_mock): subprocess_popen_mock.return_value.wait.return_value = 0 self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0) @patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']])) @patch('os.unlink', Mock(side_effect=OSError)) @patch('os.remove', Mock()) @patch('os.path.islink', Mock(side_effect=[True, False])) @patch('os.path.isfile', Mock(return_value=True)) def test_cleanup_archive_status(self): self.p.cleanup_archive_status() self.p.cleanup_archive_status() @patch('os.unlink', Mock()) @patch('os.listdir', Mock(return_value=[])) @patch('os.path.isfile', Mock(return_value=True)) @patch.object(Postgresql, 'read_postmaster_opts', Mock(return_value={})) @patch.object(Postgresql, 'single_user_mode', Mock(return_value=0)) def test_fix_cluster_state(self): self.assertTrue(self.p.fix_cluster_state()) def test_replica_cached_timeline(self): self.assertEqual(self.p.replica_cached_timeline(1), 2) def test_get_master_timeline(self): self.assertEqual(self.p.get_master_timeline(), 1) @patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica')) def test__build_effective_configuration(self): with patch.object(Postgresql, 'controldata', Mock(return_value={'max_connections setting': '200', 'max_worker_processes setting': '20', 'max_prepared_xacts setting': '100', 'max_locks_per_xact setting': '100', 'max_wal_senders setting': 10})): self.p.cancellable.cancel() self.assertFalse(self.p.start()) self.assertTrue(self.p.pending_restart) @patch('os.path.exists', Mock(return_value=True)) @patch('os.path.isfile', Mock(return_value=False)) def test_pgpass_is_dir(self): self.assertRaises(PatroniException, self.setUp)
PyV8.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement from __future__ import print_function import sys, os, re import logging import collections is_py3k = sys.version_info[0] > 2 if is_py3k: import _thread as thread from io import StringIO unicode = str raw_input = input else: import thread try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: import json except ImportError: import simplejson as json import _PyV8 __author__ = 'Flier Lu <flier.lu@gmail.com>' __version__ = '1.0' __all__ = ["ReadOnly", "DontEnum", "DontDelete", "Internal", "JSError", "JSObject", "JSArray", "JSFunction", "JSClass", "JSEngine", "JSContext", "JSObjectSpace", "JSAllocationAction", "JSStackTrace", "JSStackFrame", "profiler", "JSExtension", "JSLocker", "JSUnlocker", "AST"] class JSAttribute(object): def __init__(self, name): self.name = name def __call__(self, func): setattr(func, "__%s__" % self.name, True) return func ReadOnly = JSAttribute(name='readonly') DontEnum = JSAttribute(name='dontenum') DontDelete = JSAttribute(name='dontdel') Internal = JSAttribute(name='internal') class JSError(Exception): def __init__(self, impl): Exception.__init__(self) self._impl = impl def __str__(self): return str(self._impl) def __unicode__(self, *args, **kwargs): return unicode(self._impl) def __getattribute__(self, attr): impl = super(JSError, self).__getattribute__("_impl") try: return getattr(impl, attr) except AttributeError: return super(JSError, self).__getattribute__(attr) RE_FRAME = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?\)") RE_FUNC = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^\)]+)\)") RE_FILE = re.compile(r"\s+at\s(?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?") @staticmethod def parse_stack(value): stack = [] def int_or_nul(value): return int(value) if value else None for line in value.split('\n')[1:]: m = JSError.RE_FRAME.match(line) if m: stack.append((m.group('func'), m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col')))) continue m = JSError.RE_FUNC.match(line) if m: stack.append((m.group('func'), m.group('file'), None, None)) continue m = JSError.RE_FILE.match(line) if m: stack.append((None, m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col')))) continue assert line return stack @property def frames(self): return self.parse_stack(self.stackTrace) _PyV8._JSError._jsclass = JSError JSObject = _PyV8.JSObject JSArray = _PyV8.JSArray JSFunction = _PyV8.JSFunction # contribute by e.generalov JS_ESCAPABLE = re.compile(r'([^\x00-\x7f])') HAS_UTF8 = re.compile(r'[\x80-\xff]') def _js_escape_unicode_re_callack(match): n = ord(match.group(0)) if n < 0x10000: return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return '\\u%04x\\u%04x' % (s1, s2) def js_escape_unicode(text): """Return an ASCII-only representation of a JavaScript string""" if isinstance(text, str): if HAS_UTF8.search(text) is None: return text text = text.decode('UTF-8') return str(JS_ESCAPABLE.sub(_js_escape_unicode_re_callack, text)) class JSExtension(_PyV8.JSExtension): def __init__(self, name, source, callback=None, dependencies=[], register=True): _PyV8.JSExtension.__init__(self, js_escape_unicode(name), js_escape_unicode(source), callback, dependencies, register) def func_apply(self, thisArg, argArray=[]): if isinstance(thisArg, JSObject): return self.invoke(thisArg, argArray) this = JSContext.current.eval("(%s)" % json.dumps(thisArg)) return self.invoke(this, argArray) JSFunction.apply = func_apply class JSLocker(_PyV8.JSLocker): def __enter__(self): self.enter() if JSContext.entered: self.leave() raise RuntimeError("Lock should be acquired before enter the context") return self def __exit__(self, exc_type, exc_value, traceback): if JSContext.entered: self.leave() raise RuntimeError("Lock should be released after leave the context") self.leave() if is_py3k: def __bool__(self): return self.entered() else: def __nonzero__(self): return self.entered() class JSUnlocker(_PyV8.JSUnlocker): def __enter__(self): self.enter() return self def __exit__(self, exc_type, exc_value, traceback): self.leave() if is_py3k: def __bool__(self): return self.entered() else: def __nonzero__(self): return self.entered() class JSClass(object): __properties__ = {} __watchpoints__ = {} def __getattr__(self, name): if name == 'constructor': return JSClassConstructor(self.__class__) if name == 'prototype': return JSClassPrototype(self.__class__) prop = self.__dict__.setdefault('__properties__', {}).get(name, None) if prop and isinstance(prop[0], collections.Callable): return prop[0]() raise AttributeError(name) def __setattr__(self, name, value): prop = self.__dict__.setdefault('__properties__', {}).get(name, None) if prop and isinstance(prop[1], collections.Callable): return prop[1](value) return object.__setattr__(self, name, value) def toString(self): "Returns a string representation of an object." return "[object %s]" % self.__class__.__name__ def toLocaleString(self): "Returns a value as a string value appropriate to the host environment's current locale." return self.toString() def valueOf(self): "Returns the primitive value of the specified object." return self def hasOwnProperty(self, name): "Returns a Boolean value indicating whether an object has a property with the specified name." return hasattr(self, name) def isPrototypeOf(self, obj): "Returns a Boolean value indicating whether an object exists in the prototype chain of another object." raise NotImplementedError() def __defineGetter__(self, name, getter): "Binds an object's property to a function to be called when that property is looked up." self.__properties__[name] = (getter, self.__lookupSetter__(name)) def __lookupGetter__(self, name): "Return the function bound as a getter to the specified property." return self.__properties__.get(name, (None, None))[0] def __defineSetter__(self, name, setter): "Binds an object's property to a function to be called when an attempt is made to set that property." self.__properties__[name] = (self.__lookupGetter__(name), setter) def __lookupSetter__(self, name): "Return the function bound as a setter to the specified property." return self.__properties__.get(name, (None, None))[1] def watch(self, prop, handler): "Watches for a property to be assigned a value and runs a function when that occurs." self.__watchpoints__[prop] = handler def unwatch(self, prop): "Removes a watchpoint set with the watch method." del self.__watchpoints__[prop] class JSClassConstructor(JSClass): def __init__(self, cls): self.cls = cls @property def name(self): return self.cls.__name__ def toString(self): return "function %s() {\n [native code]\n}" % self.name def __call__(self, *args, **kwds): return self.cls(*args, **kwds) class JSClassPrototype(JSClass): def __init__(self, cls): self.cls = cls @property def constructor(self): return JSClassConstructor(self.cls) @property def name(self): return self.cls.__name__ class JSDebugProtocol(object): """ Support the V8 debugger JSON based protocol. <http://code.google.com/p/v8/wiki/DebuggerProtocol> """ class Packet(object): REQUEST = 'request' RESPONSE = 'response' EVENT = 'event' def __init__(self, payload): self.data = json.loads(payload) if type(payload) in [str, unicode] else payload @property def seq(self): return self.data['seq'] @property def type(self): return self.data['type'] class Request(Packet): @property def cmd(self): return self.data['command'] @property def args(self): return self.data['args'] class Response(Packet): @property def request_seq(self): return self.data['request_seq'] @property def cmd(self): return self.data['command'] @property def body(self): return self.data['body'] @property def running(self): return self.data['running'] @property def success(self): return self.data['success'] @property def message(self): return self.data['message'] class Event(Packet): @property def event(self): return self.data['event'] @property def body(self): return self.data['body'] def __init__(self): self.seq = 0 def nextSeq(self): seq = self.seq self.seq += 1 return seq def parsePacket(self, payload): obj = json.loads(payload) return JSDebugProtocol.Event(obj) if obj['type'] == 'event' else JSDebugProtocol.Response(obj) class JSDebugEvent(_PyV8.JSDebugEvent): class FrameData(object): def __init__(self, frame, count, name, value): self.frame = frame self.count = count self.name = name self.value = value def __len__(self): return self.count(self.frame) def __iter__(self): for i in range(self.count(self.frame)): yield (self.name(self.frame, i), self.value(self.frame, i)) class Frame(object): def __init__(self, frame): self.frame = frame @property def index(self): return int(self.frame.index()) @property def function(self): return self.frame.func() @property def receiver(self): return self.frame.receiver() @property def isConstructCall(self): return bool(self.frame.isConstructCall()) @property def isDebuggerFrame(self): return bool(self.frame.isDebuggerFrame()) @property def argumentCount(self): return int(self.frame.argumentCount()) def argumentName(self, idx): return str(self.frame.argumentName(idx)) def argumentValue(self, idx): return self.frame.argumentValue(idx) @property def arguments(self): return JSDebugEvent.FrameData(self, self.argumentCount, self.argumentName, self.argumentValue) def localCount(self, idx): return int(self.frame.localCount()) def localName(self, idx): return str(self.frame.localName(idx)) def localValue(self, idx): return self.frame.localValue(idx) @property def locals(self): return JSDebugEvent.FrameData(self, self.localCount, self.localName, self.localValue) @property def sourcePosition(self): return self.frame.sourcePosition() @property def sourceLine(self): return int(self.frame.sourceLine()) @property def sourceColumn(self): return int(self.frame.sourceColumn()) @property def sourceLineText(self): return str(self.frame.sourceLineText()) def evaluate(self, source, disable_break = True): return self.frame.evaluate(source, disable_break) @property def invocationText(self): return str(self.frame.invocationText()) @property def sourceAndPositionText(self): return str(self.frame.sourceAndPositionText()) @property def localsText(self): return str(self.frame.localsText()) def __str__(self): return str(self.frame.toText()) class Frames(object): def __init__(self, state): self.state = state def __len__(self): return self.state.frameCount def __iter__(self): for i in range(self.state.frameCount): yield self.state.frame(i) class State(object): def __init__(self, state): self.state = state @property def frameCount(self): return int(self.state.frameCount()) def frame(self, idx = None): return JSDebugEvent.Frame(self.state.frame(idx)) @property def selectedFrame(self): return int(self.state.selectedFrame()) @property def frames(self): return JSDebugEvent.Frames(self) def __repr__(self): s = StringIO() try: for frame in self.frames: s.write(str(frame)) return s.getvalue() finally: s.close() class DebugEvent(object): pass class StateEvent(DebugEvent): __state = None @property def state(self): if not self.__state: self.__state = JSDebugEvent.State(self.event.executionState()) return self.__state class BreakEvent(StateEvent): type = _PyV8.JSDebugEvent.Break def __init__(self, event): self.event = event class ExceptionEvent(StateEvent): type = _PyV8.JSDebugEvent.Exception def __init__(self, event): self.event = event class NewFunctionEvent(DebugEvent): type = _PyV8.JSDebugEvent.NewFunction def __init__(self, event): self.event = event class Script(object): def __init__(self, script): self.script = script @property def source(self): return self.script.source() @property def id(self): return self.script.id() @property def name(self): return self.script.name() @property def lineOffset(self): return self.script.lineOffset() @property def lineCount(self): return self.script.lineCount() @property def columnOffset(self): return self.script.columnOffset() @property def type(self): return self.script.type() def __repr__(self): return "<%s script %s @ %d:%d> : '%s'" % (self.type, self.name, self.lineOffset, self.columnOffset, self.source) class CompileEvent(StateEvent): def __init__(self, event): self.event = event @property def script(self): if not hasattr(self, "_script"): setattr(self, "_script", JSDebugEvent.Script(self.event.script())) return self._script def __str__(self): return str(self.script) class BeforeCompileEvent(CompileEvent): type = _PyV8.JSDebugEvent.BeforeCompile def __init__(self, event): JSDebugEvent.CompileEvent.__init__(self, event) def __repr__(self): return "before compile script: %s\n%s" % (repr(self.script), repr(self.state)) class AfterCompileEvent(CompileEvent): type = _PyV8.JSDebugEvent.AfterCompile def __init__(self, event): JSDebugEvent.CompileEvent.__init__(self, event) def __repr__(self): return "after compile script: %s\n%s" % (repr(self.script), repr(self.state)) onMessage = None onBreak = None onException = None onNewFunction = None onBeforeCompile = None onAfterCompile = None class JSDebugger(JSDebugProtocol, JSDebugEvent): def __init__(self): JSDebugProtocol.__init__(self) JSDebugEvent.__init__(self) def __enter__(self): self.enabled = True return self def __exit__(self, exc_type, exc_value, traceback): self.enabled = False @property def context(self): if not hasattr(self, '_context'): self._context = JSContext(ctxt=_PyV8.debug().context) return self._context def isEnabled(self): return _PyV8.debug().enabled def setEnabled(self, enable): dbg = _PyV8.debug() if enable: dbg.onDebugEvent = self.onDebugEvent dbg.onDebugMessage = self.onDebugMessage dbg.onDispatchDebugMessages = self.onDispatchDebugMessages else: dbg.onDebugEvent = None dbg.onDebugMessage = None dbg.onDispatchDebugMessages = None dbg.enabled = enable enabled = property(isEnabled, setEnabled) def onDebugMessage(self, msg, data): if self.onMessage: self.onMessage(json.loads(msg)) def onDebugEvent(self, type, state, evt): if type == JSDebugEvent.Break: if self.onBreak: self.onBreak(JSDebugEvent.BreakEvent(evt)) elif type == JSDebugEvent.Exception: if self.onException: self.onException(JSDebugEvent.ExceptionEvent(evt)) elif type == JSDebugEvent.NewFunction: if self.onNewFunction: self.onNewFunction(JSDebugEvent.NewFunctionEvent(evt)) elif type == JSDebugEvent.BeforeCompile: if self.onBeforeCompile: self.onBeforeCompile(JSDebugEvent.BeforeCompileEvent(evt)) elif type == JSDebugEvent.AfterCompile: if self.onAfterCompile: self.onAfterCompile(JSDebugEvent.AfterCompileEvent(evt)) def onDispatchDebugMessages(self): return True def debugBreak(self): _PyV8.debug().debugBreak() def debugBreakForCommand(self): _PyV8.debug().debugBreakForCommand() def cancelDebugBreak(self): _PyV8.debug().cancelDebugBreak() def processDebugMessages(self): _PyV8.debug().processDebugMessages() def sendCommand(self, cmd, *args, **kwds): request = json.dumps({ 'seq': self.nextSeq(), 'type': 'request', 'command': cmd, 'arguments': kwds }) _PyV8.debug().sendCommand(request) return request def debugContinue(self, action='next', steps=1): return self.sendCommand('continue', stepaction=action) def stepNext(self, steps=1): """Step to the next statement in the current function.""" return self.debugContinue(action='next', steps=steps) def stepIn(self, steps=1): """Step into new functions invoked or the next statement in the current function.""" return self.debugContinue(action='in', steps=steps) def stepOut(self, steps=1): """Step out of the current function.""" return self.debugContinue(action='out', steps=steps) def stepMin(self, steps=1): """Perform a minimum step in the current function.""" return self.debugContinue(action='out', steps=steps) class JSProfiler(_PyV8.JSProfiler): @property def logs(self): pos = 0 while True: size, buf = self.getLogLines(pos) if size == 0: break for line in buf.split('\n'): yield line pos += size profiler = JSProfiler() JSObjectSpace = _PyV8.JSObjectSpace JSAllocationAction = _PyV8.JSAllocationAction class JSEngine(_PyV8.JSEngine): def __init__(self): _PyV8.JSEngine.__init__(self) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): del self JSScript = _PyV8.JSScript JSStackTrace = _PyV8.JSStackTrace JSStackTrace.Options = _PyV8.JSStackTraceOptions JSStackFrame = _PyV8.JSStackFrame class JSIsolate(_PyV8.JSIsolate): def __enter__(self): self.enter() return self def __exit__(self, exc_type, exc_value, traceback): self.leave() del self class JSContext(_PyV8.JSContext): def __init__(self, obj=None, extensions=None, ctxt=None): if JSLocker.active: self.lock = JSLocker() self.lock.enter() if ctxt: _PyV8.JSContext.__init__(self, ctxt) else: _PyV8.JSContext.__init__(self, obj, extensions or []) def __enter__(self): self.enter() return self def __exit__(self, exc_type, exc_value, traceback): self.leave() if hasattr(JSLocker, 'lock'): self.lock.leave() self.lock = None del self # contribute by marc boeker <http://code.google.com/u/marc.boeker/> def convert(obj): if type(obj) == _PyV8.JSArray: return [convert(v) for v in obj] if type(obj) == _PyV8.JSObject: return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in (obj.__dir__() if is_py3k else obj.__members__)]) return obj class AST: Scope = _PyV8.AstScope VarMode = _PyV8.AstVariableMode Var = _PyV8.AstVariable Label = _PyV8.AstLabel NodeType = _PyV8.AstNodeType Node = _PyV8.AstNode Statement = _PyV8.AstStatement Expression = _PyV8.AstExpression Breakable = _PyV8.AstBreakableStatement Block = _PyV8.AstBlock Declaration = _PyV8.AstDeclaration VariableDeclaration = _PyV8.AstVariableDeclaration Module = _PyV8.AstModule ModuleDeclaration = _PyV8.AstModuleDeclaration ModuleLiteral = _PyV8.AstModuleLiteral ModuleVariable = _PyV8.AstModuleVariable ModulePath = _PyV8.AstModulePath Iteration = _PyV8.AstIterationStatement DoWhile = _PyV8.AstDoWhileStatement While = _PyV8.AstWhileStatement For = _PyV8.AstForStatement ForIn = _PyV8.AstForInStatement ExpressionStatement = _PyV8.AstExpressionStatement Continue = _PyV8.AstContinueStatement Break = _PyV8.AstBreakStatement Return = _PyV8.AstReturnStatement With = _PyV8.AstWithStatement Case = _PyV8.AstCaseClause Switch = _PyV8.AstSwitchStatement Try = _PyV8.AstTryStatement TryCatch = _PyV8.AstTryCatchStatement TryFinally = _PyV8.AstTryFinallyStatement Debugger = _PyV8.AstDebuggerStatement Empty = _PyV8.AstEmptyStatement Literal = _PyV8.AstLiteral MaterializedLiteral = _PyV8.AstMaterializedLiteral PropertyKind = _PyV8.AstPropertyKind ObjectProperty = _PyV8.AstObjectProperty Object = _PyV8.AstObjectLiteral RegExp = _PyV8.AstRegExpLiteral Array = _PyV8.AstArrayLiteral VarProxy = _PyV8.AstVariableProxy Property = _PyV8.AstProperty Call = _PyV8.AstCall CallNew = _PyV8.AstCallNew CallRuntime = _PyV8.AstCallRuntime Op = _PyV8.AstOperation UnaryOp = _PyV8.AstUnaryOperation BinOp = _PyV8.AstBinaryOperation CountOp = _PyV8.AstCountOperation CompOp = _PyV8.AstCompareOperation Conditional = _PyV8.AstConditional Assignment = _PyV8.AstAssignment Throw = _PyV8.AstThrow Function = _PyV8.AstFunctionLiteral SharedFunction = _PyV8.AstSharedFunctionInfoLiteral This = _PyV8.AstThisFunction from datetime import * import unittest import traceback if is_py3k: def toNativeString(s): return s def toUnicodeString(s): return s else: def toNativeString(s, encoding='utf-8'): return s.encode(encoding) if isinstance(s, unicode) else s def toUnicodeString(s, encoding='utf-8'): return s if isinstance(s, unicode) else unicode(s, encoding) class TestContext(unittest.TestCase): def testMultiNamespace(self): self.assertTrue(not bool(JSContext.inContext)) self.assertTrue(not bool(JSContext.entered)) class Global(object): name = "global" g = Global() with JSContext(g) as ctxt: self.assertTrue(bool(JSContext.inContext)) self.assertEqual(g.name, str(JSContext.entered.locals.name)) self.assertEqual(g.name, str(JSContext.current.locals.name)) class Local(object): name = "local" l = Local() with JSContext(l): self.assertTrue(bool(JSContext.inContext)) self.assertEqual(l.name, str(JSContext.entered.locals.name)) self.assertEqual(l.name, str(JSContext.current.locals.name)) self.assertTrue(bool(JSContext.inContext)) self.assertEqual(g.name, str(JSContext.entered.locals.name)) self.assertEqual(g.name, str(JSContext.current.locals.name)) self.assertTrue(not bool(JSContext.entered)) self.assertTrue(not bool(JSContext.inContext)) def _testMultiContext(self): # Create an environment with JSContext() as ctxt0: ctxt0.securityToken = "password" global0 = ctxt0.locals global0.custom = 1234 self.assertEqual(1234, int(global0.custom)) # Create an independent environment with JSContext() as ctxt1: ctxt1.securityToken = ctxt0.securityToken global1 = ctxt1.locals global1.custom = 1234 with ctxt0: self.assertEqual(1234, int(global0.custom)) self.assertEqual(1234, int(global1.custom)) # Now create a new context with the old global with JSContext(global1) as ctxt2: ctxt2.securityToken = ctxt1.securityToken with ctxt1: self.assertEqual(1234, int(global1.custom)) def _testSecurityChecks(self): with JSContext() as env1: env1.securityToken = "foo" # Create a function in env1. env1.eval("spy=function(){return spy;}") spy = env1.locals.spy self.assertTrue(isinstance(spy, _PyV8.JSFunction)) # Create another function accessing global objects. env1.eval("spy2=function(){return 123;}") spy2 = env1.locals.spy2 self.assertTrue(isinstance(spy2, _PyV8.JSFunction)) # Switch to env2 in the same domain and invoke spy on env2. env2 = JSContext() env2.securityToken = "foo" with env2: result = spy.apply(env2.locals) self.assertTrue(isinstance(result, _PyV8.JSFunction)) env2.securityToken = "bar" # Call cross_domain_call, it should throw an exception with env2: self.assertRaises(JSError, spy2.apply, env2.locals) def _testCrossDomainDelete(self): with JSContext() as env1: env2 = JSContext() # Set to the same domain. env1.securityToken = "foo" env2.securityToken = "foo" env1.locals.prop = 3 env2.locals.env1 = env1.locals # Change env2 to a different domain and delete env1.prop. #env2.securityToken = "bar" self.assertEqual(3, int(env1.eval("prop"))) with env2: self.assertEqual(3, int(env2.eval("this.env1.prop"))) self.assertEqual("false", str(env2.eval("delete env1.prop"))) # Check that env1.prop still exists. self.assertEqual(3, int(env1.locals.prop)) class TestWrapper(unittest.TestCase): def testObject(self): with JSContext() as ctxt: o = ctxt.eval("new Object()") self.assertTrue(hash(o) > 0) o1 = o.clone() self.assertEqual(hash(o1), hash(o)) self.assertTrue(o != o1) self.assertRaises(UnboundLocalError, o.clone) def testAutoConverter(self): with JSContext() as ctxt: ctxt.eval(""" var_i = 1; var_f = 1.0; var_s = "test"; var_b = true; var_s_obj = new String("test"); var_b_obj = new Boolean(true); var_f_obj = new Number(1.5); """) vars = ctxt.locals var_i = vars.var_i self.assertTrue(var_i) self.assertEqual(1, int(var_i)) var_f = vars.var_f self.assertTrue(var_f) self.assertEqual(1.0, float(vars.var_f)) var_s = vars.var_s self.assertTrue(var_s) self.assertEqual("test", str(vars.var_s)) var_b = vars.var_b self.assertTrue(var_b) self.assertTrue(bool(var_b)) self.assertEqual("test", vars.var_s_obj) self.assertTrue(vars.var_b_obj) self.assertEqual(1.5, vars.var_f_obj) attrs = dir(ctxt.locals) self.assertTrue(attrs) self.assertTrue("var_i" in attrs) self.assertTrue("var_f" in attrs) self.assertTrue("var_s" in attrs) self.assertTrue("var_b" in attrs) self.assertTrue("var_s_obj" in attrs) self.assertTrue("var_b_obj" in attrs) self.assertTrue("var_f_obj" in attrs) def testExactConverter(self): class MyInteger(int, JSClass): pass class MyString(str, JSClass): pass class MyUnicode(unicode, JSClass): pass class MyDateTime(time, JSClass): pass class Global(JSClass): var_bool = True var_int = 1 var_float = 1.0 var_str = 'str' var_unicode = u'unicode' var_datetime = datetime.now() var_date = date.today() var_time = time() var_myint = MyInteger() var_mystr = MyString('mystr') var_myunicode = MyUnicode('myunicode') var_mytime = MyDateTime() with JSContext(Global()) as ctxt: typename = ctxt.eval("(function (name) { return this[name].constructor.name; })") typeof = ctxt.eval("(function (name) { return typeof(this[name]); })") self.assertEqual('Boolean', typename('var_bool')) self.assertEqual('Number', typename('var_int')) self.assertEqual('Number', typename('var_float')) self.assertEqual('String', typename('var_str')) self.assertEqual('String', typename('var_unicode')) self.assertEqual('Date', typename('var_datetime')) self.assertEqual('Date', typename('var_date')) self.assertEqual('Date', typename('var_time')) self.assertEqual('MyInteger', typename('var_myint')) self.assertEqual('MyString', typename('var_mystr')) self.assertEqual('MyUnicode', typename('var_myunicode')) self.assertEqual('MyDateTime', typename('var_mytime')) self.assertEqual('object', typeof('var_myint')) self.assertEqual('object', typeof('var_mystr')) self.assertEqual('object', typeof('var_myunicode')) self.assertEqual('object', typeof('var_mytime')) def testJavascriptWrapper(self): with JSContext() as ctxt: self.assertEqual(type(None), type(ctxt.eval("null"))) self.assertEqual(type(None), type(ctxt.eval("undefined"))) self.assertEqual(bool, type(ctxt.eval("true"))) self.assertEqual(str, type(ctxt.eval("'test'"))) self.assertEqual(int, type(ctxt.eval("123"))) self.assertEqual(float, type(ctxt.eval("3.14"))) self.assertEqual(datetime, type(ctxt.eval("new Date()"))) self.assertEqual(JSArray, type(ctxt.eval("[1, 2, 3]"))) self.assertEqual(JSFunction, type(ctxt.eval("(function() {})"))) self.assertEqual(JSObject, type(ctxt.eval("new Object()"))) def testPythonWrapper(self): with JSContext() as ctxt: typeof = ctxt.eval("(function type(value) { return typeof value; })") protoof = ctxt.eval("(function protoof(value) { return Object.prototype.toString.apply(value); })") self.assertEqual('[object Null]', protoof(None)) self.assertEqual('boolean', typeof(True)) self.assertEqual('number', typeof(123)) self.assertEqual('number', typeof(3.14)) self.assertEqual('string', typeof('test')) self.assertEqual('string', typeof(u'test')) self.assertEqual('[object Date]', protoof(datetime.now())) self.assertEqual('[object Date]', protoof(date.today())) self.assertEqual('[object Date]', protoof(time())) def test(): pass self.assertEqual('[object Function]', protoof(abs)) self.assertEqual('[object Function]', protoof(test)) self.assertEqual('[object Function]', protoof(self.testPythonWrapper)) self.assertEqual('[object Function]', protoof(int)) def testFunction(self): with JSContext() as ctxt: func = ctxt.eval(""" (function () { function a() { return "abc"; } return a(); }) """) self.assertEqual("abc", str(func())) self.assertTrue(func != None) self.assertFalse(func == None) func = ctxt.eval("(function test() {})") self.assertEqual("test", func.name) self.assertEqual("", func.resname) self.assertEqual(0, func.linenum) self.assertEqual(14, func.colnum) self.assertEqual(0, func.lineoff) self.assertEqual(0, func.coloff) #TODO fix me, why the setter doesn't work? # func.name = "hello" # it seems __setattr__ was called instead of CJavascriptFunction::SetName func.setName("hello") self.assertEqual("hello", func.name) def testCall(self): class Hello(object): def __call__(self, name): return "hello " + name class Global(JSClass): hello = Hello() with JSContext(Global()) as ctxt: self.assertEqual("hello flier", ctxt.eval("hello('flier')")) def testJSFunction(self): with JSContext() as ctxt: hello = ctxt.eval("(function (name) { return 'hello ' + name; })") self.assertTrue(isinstance(hello, _PyV8.JSFunction)) self.assertEqual("hello flier", hello('flier')) self.assertEqual("hello flier", hello.invoke(['flier'])) obj = ctxt.eval("({ 'name': 'flier', 'hello': function (name) { return 'hello ' + name + ' from ' + this.name; }})") hello = obj.hello self.assertTrue(isinstance(hello, JSFunction)) self.assertEqual("hello flier from flier", hello('flier')) tester = ctxt.eval("({ 'name': 'tester' })") self.assertEqual("hello flier from tester", hello.invoke(tester, ['flier'])) self.assertEqual("hello flier from json", hello.apply({ 'name': 'json' }, ['flier'])) def testConstructor(self): with JSContext() as ctx: ctx.eval(""" var Test = function() { this.trySomething(); }; Test.prototype.trySomething = function() { this.name = 'flier'; }; var Test2 = function(first_name, last_name) { this.name = first_name + ' ' + last_name; }; """) self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSFunction)) test = JSObject.create(ctx.locals.Test) self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSObject)) self.assertEqual("flier", test.name); test2 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu')) self.assertEqual("Flier Lu", test2.name); test3 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'), { 'email': 'flier.lu@gmail.com' }) self.assertEqual("flier.lu@gmail.com", test3.email); def testJSError(self): with JSContext() as ctxt: try: ctxt.eval('throw "test"') self.fail() except: self.assertTrue(JSError, sys.exc_info()[0]) def testErrorInfo(self): with JSContext() as ctxt: with JSEngine() as engine: try: engine.compile(""" function hello() { throw Error("hello world"); } hello();""", "test", 10, 10).run() self.fail() except JSError as e: self.assertTrue(str(e).startswith('JSError: Error: hello world ( test @ 14 : 34 ) ->')) self.assertEqual("Error", e.name) self.assertEqual("hello world", e.message) self.assertEqual("test", e.scriptName) self.assertEqual(14, e.lineNum) self.assertEqual(102, e.startPos) self.assertEqual(103, e.endPos) self.assertEqual(34, e.startCol) self.assertEqual(35, e.endCol) self.assertEqual('throw Error("hello world");', e.sourceLine.strip()) self.assertEqual('Error: hello world\n' + ' at Error (<anonymous>)\n' + ' at hello (test:14:35)\n' + ' at test:17:25', e.stackTrace) def testParseStack(self): self.assertEqual([ ('Error', 'unknown source', None, None), ('test', 'native', None, None), ('<anonymous>', 'test0', 3, 5), ('f', 'test1', 2, 19), ('g', 'test2', 1, 15), (None, 'test3', 1, None), (None, 'test3', 1, 1), ], JSError.parse_stack("""Error: err at Error (unknown source) at test (native) at new <anonymous> (test0:3:5) at f (test1:2:19) at g (test2:1:15) at test3:1 at test3:1:1""")) def testStackTrace(self): class Global(JSClass): def GetCurrentStackTrace(self, limit): return JSStackTrace.GetCurrentStackTrace(4, JSStackTrace.Options.Detailed) with JSContext(Global()) as ctxt: st = ctxt.eval(""" function a() { return GetCurrentStackTrace(10); } function b() { return eval("a()"); } function c() { return new b(); } c();""", "test") self.assertEqual(4, len(st)) self.assertEqual("\tat a (test:4:28)\n\tat (eval)\n\tat b (test:8:28)\n\tat c (test:12:28)\n", str(st)) self.assertEqual("test.a (4:28)\n. (1:1) eval\ntest.b (8:28) constructor\ntest.c (12:28)", "\n".join(["%s.%s (%d:%d)%s%s" % ( f.scriptName, f.funcName, f.lineNum, f.column, ' eval' if f.isEval else '', ' constructor' if f.isConstructor else '') for f in st])) def testPythonException(self): class Global(JSClass): def raiseException(self): raise RuntimeError("Hello") with JSContext(Global()) as ctxt: r = ctxt.eval(""" msg =""; try { this.raiseException() } catch(e) { msg += "catch " + e + ";"; } finally { msg += "finally"; }""") self.assertEqual("catch Error: Hello;finally", str(ctxt.locals.msg)) def testExceptionMapping(self): class TestException(Exception): pass class Global(JSClass): def raiseIndexError(self): return [1, 2, 3][5] def raiseAttributeError(self): None.hello() def raiseSyntaxError(self): eval("???") def raiseTypeError(self): int(sys) def raiseNotImplementedError(self): raise NotImplementedError("Not support") def raiseExceptions(self): raise TestException() with JSContext(Global()) as ctxt: ctxt.eval("try { this.raiseIndexError(); } catch (e) { msg = e; }") self.assertEqual("RangeError: list index out of range", str(ctxt.locals.msg)) ctxt.eval("try { this.raiseAttributeError(); } catch (e) { msg = e; }") self.assertEqual("ReferenceError: 'NoneType' object has no attribute 'hello'", str(ctxt.locals.msg)) ctxt.eval("try { this.raiseSyntaxError(); } catch (e) { msg = e; }") self.assertEqual("SyntaxError: invalid syntax", str(ctxt.locals.msg)) ctxt.eval("try { this.raiseTypeError(); } catch (e) { msg = e; }") self.assertEqual("TypeError: int() argument must be a string or a number, not 'module'", str(ctxt.locals.msg)) ctxt.eval("try { this.raiseNotImplementedError(); } catch (e) { msg = e; }") self.assertEqual("Error: Not support", str(ctxt.locals.msg)) self.assertRaises(TestException, ctxt.eval, "this.raiseExceptions();") def testArray(self): with JSContext() as ctxt: array = ctxt.eval(""" var array = new Array(); for (i=0; i<10; i++) { array[i] = 10-i; } array; """) self.assertTrue(isinstance(array, _PyV8.JSArray)) self.assertEqual(10, len(array)) self.assertTrue(5 in array) self.assertFalse(15 in array) self.assertEqual(10, len(array)) for i in range(10): self.assertEqual(10-i, array[i]) array[5] = 0 self.assertEqual(0, array[5]) del array[5] self.assertEqual(None, array[5]) # array [10, 9, 8, 7, 6, None, 4, 3, 2, 1] # array[4:7] 4^^^^^^^^^7 # array[-3:-1] -3^^^^^^-1 # array[0:0] [] self.assertEqual([6, None, 4], array[4:7]) self.assertEqual([3, 2], array[-3:-1]) self.assertEqual([], array[0:0]) array[1:3] = [9, 9, 9] self.assertEqual([10, 9, 9, 9, 7, 6, None, 4, 3, 2, 1], list(array)) array[5:8] = [8, 8] self.assertEqual([10, 9, 9, 9, 7, 8, 8, 3, 2, 1], list(array)) del array[1:4] self.assertEqual([10, 7, 8, 8, 3, 2, 1], list(array)) ctxt.locals.array1 = JSArray(5) ctxt.locals.array2 = JSArray([1, 2, 3, 4, 5]) for i in range(len(ctxt.locals.array2)): ctxt.locals.array1[i] = ctxt.locals.array2[i] * 10 ctxt.eval(""" var sum = 0; for (i=0; i<array1.length; i++) sum += array1[i] for (i=0; i<array2.length; i++) sum += array2[i] """) self.assertEqual(165, ctxt.locals.sum) ctxt.locals.array3 = [1, 2, 3, 4, 5] self.assertTrue(ctxt.eval('array3[1] === 2')) self.assertTrue(ctxt.eval('array3[9] === undefined')) args = [ ["a = Array(7); for(i=0; i<a.length; i++) a[i] = i; a[3] = undefined; a[a.length-1]; a", "0,1,2,,4,5,6", [0, 1, 2, None, 4, 5, 6]], ["a = Array(7); for(i=0; i<a.length - 1; i++) a[i] = i; a[a.length-1]; a", "0,1,2,3,4,5,", [0, 1, 2, 3, 4, 5, None]], ["a = Array(7); for(i=1; i<a.length; i++) a[i] = i; a[a.length-1]; a", ",1,2,3,4,5,6", [None, 1, 2, 3, 4, 5, 6]] ] for arg in args: array = ctxt.eval(arg[0]) self.assertEqual(arg[1], str(array)) self.assertEqual(arg[2], [array[i] for i in range(len(array))]) self.assertEqual(3, ctxt.eval("(function (arr) { return arr.length; })")(JSArray([1, 2, 3]))) self.assertEqual(2, ctxt.eval("(function (arr, idx) { return arr[idx]; })")(JSArray([1, 2, 3]), 1)) self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray([1, 2, 3]))) self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray((1, 2, 3)))) self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray(range(3)))) [x for x in JSArray([1,2,3])] def testMultiDimArray(self): with JSContext() as ctxt: ret = ctxt.eval(""" ({ 'test': function(){ return [ [ 1, 'abla' ], [ 2, 'ajkss' ], ] } }) """).test() self.assertEqual([[1, 'abla'], [2, 'ajkss']], convert(ret)) def testLazyConstructor(self): class Globals(JSClass): def __init__(self): self.array=JSArray([1,2,3]) with JSContext(Globals()) as ctxt: self.assertEqual(2, ctxt.eval("""array[1]""")) def testForEach(self): class NamedClass(object): foo = 1 def __init__(self): self.bar = 2 @property def foobar(self): return self.foo + self.bar def gen(x): for i in range(x): yield i with JSContext() as ctxt: func = ctxt.eval("""(function (k) { var result = []; for (var prop in k) { result.push(prop); } return result; })""") self.assertTrue(set(["bar", "foo", "foobar"]).issubset(set(func(NamedClass())))) self.assertEqual(["0", "1", "2"], list(func([1, 2, 3]))) self.assertEqual(["0", "1", "2"], list(func((1, 2, 3)))) self.assertEqual(["1", "2", "3"], list(func({1:1, 2:2, 3:3}))) self.assertEqual(["0", "1", "2"], list(func(gen(3)))) def testDict(self): with JSContext() as ctxt: obj = ctxt.eval("var r = { 'a' : 1, 'b' : 2 }; r") self.assertEqual(1, obj.a) self.assertEqual(2, obj.b) self.assertEqual({ 'a' : 1, 'b' : 2 }, dict(obj)) self.assertEqual({ 'a': 1, 'b': [1, 2, 3], 'c': { 'str' : 'goofy', 'float' : 1.234, 'obj' : { 'name': 'john doe' }}, 'd': True, 'e': None }, convert(ctxt.eval("""var x = { a: 1, b: [1, 2, 3], c: { str: 'goofy', float: 1.234, obj: { name: 'john doe' }}, d: true, e: null }; x"""))) def testDate(self): with JSContext() as ctxt: now1 = ctxt.eval("new Date();") self.assertTrue(now1) now2 = datetime.utcnow() delta = now2 - now1 if now2 > now1 else now1 - now2 self.assertTrue(delta < timedelta(seconds=1)) func = ctxt.eval("(function (d) { return d.toString(); })") now = datetime.now() self.assertTrue(str(func(now)).startswith(now.strftime("%a %b %d %Y %H:%M:%S"))) def testUnicode(self): with JSContext() as ctxt: self.assertEqual(u"人", toUnicodeString(ctxt.eval(u"\"人\""))) self.assertEqual(u"é", toUnicodeString(ctxt.eval(u"\"é\""))) func = ctxt.eval("(function (msg) { return msg.length; })") self.assertEqual(2, func(u"测试")) def testClassicStyleObject(self): class FileSystemWarpper: @property def cwd(self): return os.getcwd() class Global: @property def fs(self): return FileSystemWarpper() with JSContext(Global()) as ctxt: self.assertEqual(os.getcwd(), ctxt.eval("fs.cwd")) def testRefCount(self): count = sys.getrefcount(None) class Global(JSClass): pass with JSContext(Global()) as ctxt: ctxt.eval(""" var none = null; """) self.assertEqual(count+1, sys.getrefcount(None)) ctxt.eval(""" var none = null; """) self.assertEqual(count+1, sys.getrefcount(None)) def testProperty(self): class Global(JSClass): def __init__(self, name): self._name = name def getname(self): return self._name def setname(self, name): self._name = name def delname(self): self._name = 'deleted' name = property(getname, setname, delname) g = Global('world') with JSContext(g) as ctxt: self.assertEqual('world', ctxt.eval("name")) self.assertEqual('flier', ctxt.eval("this.name = 'flier';")) self.assertEqual('flier', ctxt.eval("name")) self.assertTrue(ctxt.eval("delete name")) ### # FIXME replace the global object with Python object # #self.assertEqual('deleted', ctxt.eval("name")) #ctxt.eval("__defineGetter__('name', function() { return 'fixed'; });") #self.assertEqual('fixed', ctxt.eval("name")) def testGetterAndSetter(self): class Global(JSClass): def __init__(self, testval): self.testval = testval with JSContext(Global("Test Value A")) as ctxt: self.assertEqual("Test Value A", ctxt.locals.testval) ctxt.eval(""" this.__defineGetter__("test", function() { return this.testval; }); this.__defineSetter__("test", function(val) { this.testval = val; }); """) self.assertEqual("Test Value A", ctxt.locals.test) ctxt.eval("test = 'Test Value B';") self.assertEqual("Test Value B", ctxt.locals.test) def testDestructor(self): import gc owner = self owner.deleted = False class Hello(object): def say(self): pass def __del__(self): owner.deleted = True def test(): with JSContext() as ctxt: fn = ctxt.eval("(function (obj) { obj.say(); })") obj = Hello() self.assertEqual(2, sys.getrefcount(obj)) fn(obj) self.assertEqual(4, sys.getrefcount(obj)) del obj test() self.assertFalse(owner.deleted) JSEngine.collect() gc.collect() self.assertTrue(owner.deleted) def testNullInString(self): with JSContext() as ctxt: fn = ctxt.eval("(function (s) { return s; })") self.assertEqual("hello \0 world", fn("hello \0 world")) def testLivingObjectCache(self): class Global(JSClass): i = 1 b = True o = object() with JSContext(Global()) as ctxt: self.assertTrue(ctxt.eval("i == i")) self.assertTrue(ctxt.eval("b == b")) self.assertTrue(ctxt.eval("o == o")) def testNamedSetter(self): class Obj(JSClass): @property def p(self): return self._p @p.setter def p(self, value): self._p = value class Global(JSClass): def __init__(self): self.obj = Obj() self.d = {} self.p = None with JSContext(Global()) as ctxt: ctxt.eval(""" x = obj; x.y = 10; x.p = 10; d.y = 10; """) self.assertEqual(10, ctxt.eval("obj.y")) self.assertEqual(10, ctxt.eval("obj.p")) self.assertEqual(10, ctxt.locals.d['y']) def testWatch(self): class Obj(JSClass): def __init__(self): self.p = 1 class Global(JSClass): def __init__(self): self.o = Obj() with JSContext(Global()) as ctxt: ctxt.eval(""" o.watch("p", function (id, oldval, newval) { return oldval + newval; }); """) self.assertEqual(1, ctxt.eval("o.p")) ctxt.eval("o.p = 2;") self.assertEqual(3, ctxt.eval("o.p")) ctxt.eval("delete o.p;") self.assertEqual(None, ctxt.eval("o.p")) ctxt.eval("o.p = 2;") self.assertEqual(2, ctxt.eval("o.p")) ctxt.eval("o.unwatch('p');") ctxt.eval("o.p = 1;") self.assertEqual(1, ctxt.eval("o.p")) def testReferenceError(self): class Global(JSClass): def __init__(self): self.s = self with JSContext(Global()) as ctxt: self.assertRaises(ReferenceError, ctxt.eval, 'x') self.assertTrue(ctxt.eval("typeof(x) === 'undefined'")) self.assertTrue(ctxt.eval("typeof(String) === 'function'")) self.assertTrue(ctxt.eval("typeof(s.String) === 'undefined'")) self.assertTrue(ctxt.eval("typeof(s.z) === 'undefined'")) def testRaiseExceptionInGetter(self): class Document(JSClass): def __getattr__(self, name): if name == 'y': raise TypeError() return JSClass.__getattr__(self, name) class Global(JSClass): def __init__(self): self.document = Document() with JSContext(Global()) as ctxt: self.assertEqual(None, ctxt.eval('document.x')) self.assertRaises(TypeError, ctxt.eval, 'document.y') class TestMultithread(unittest.TestCase): def testLocker(self): self.assertFalse(JSLocker.active) self.assertFalse(JSLocker.locked) with JSLocker() as outter_locker: self.assertTrue(JSLocker.active) self.assertTrue(JSLocker.locked) self.assertTrue(outter_locker) with JSLocker() as inner_locker: self.assertTrue(JSLocker.locked) self.assertTrue(outter_locker) self.assertTrue(inner_locker) with JSUnlocker() as unlocker: self.assertFalse(JSLocker.locked) self.assertTrue(outter_locker) self.assertTrue(inner_locker) self.assertTrue(JSLocker.locked) self.assertTrue(JSLocker.active) self.assertFalse(JSLocker.locked) locker = JSLocker() with JSContext(): self.assertRaises(RuntimeError, locker.__enter__) self.assertRaises(RuntimeError, locker.__exit__, None, None, None) del locker def testMultiPythonThread(self): import time, threading class Global: count = 0 started = threading.Event() finished = threading.Semaphore(0) def sleep(self, ms): time.sleep(ms / 1000.0) self.count += 1 g = Global() def run(): with JSContext(g) as ctxt: ctxt.eval(""" started.wait(); for (i=0; i<10; i++) { sleep(100); } finished.release(); """) threading.Thread(target=run).start() now = time.time() self.assertEqual(0, g.count) g.started.set() g.finished.acquire() self.assertEqual(10, g.count) self.assertTrue((time.time() - now) >= 1) def testMultiJavascriptThread(self): import time, threading class Global: result = [] def add(self, value): with JSUnlocker(): time.sleep(0.1) self.result.append(value) g = Global() def run(): with JSContext(g) as ctxt: ctxt.eval(""" for (i=0; i<10; i++) add(i); """) threads = [threading.Thread(target=run), threading.Thread(target=run)] with JSLocker(): for t in threads: t.start() for t in threads: t.join() self.assertEqual(20, len(g.result)) def _testPreemptionJavascriptThreads(self): import time, threading class Global: result = [] def add(self, value): # we use preemption scheduler to switch between threads # so, just comment the JSUnlocker # # with JSUnlocker() as unlocker: time.sleep(0.1) self.result.append(value) g = Global() def run(): with JSContext(g) as ctxt: ctxt.eval(""" for (i=0; i<10; i++) add(i); """) threads = [threading.Thread(target=run), threading.Thread(target=run)] with JSLocker() as locker: JSLocker.startPreemption(100) for t in threads: t.start() for t in threads: t.join() self.assertEqual(20, len(g.result)) class TestEngine(unittest.TestCase): def testClassProperties(self): with JSContext() as ctxt: self.assertTrue(str(JSEngine.version).startswith("3.")) self.assertFalse(JSEngine.dead) def testCompile(self): with JSContext() as ctxt: with JSEngine() as engine: s = engine.compile("1+2") self.assertTrue(isinstance(s, _PyV8.JSScript)) self.assertEqual("1+2", s.source) self.assertEqual(3, int(s.run())) self.assertRaises(SyntaxError, engine.compile, "1+") def testPrecompile(self): with JSContext() as ctxt: with JSEngine() as engine: data = engine.precompile("1+2") self.assertTrue(data) self.assertEqual(28, len(data)) s = engine.compile("1+2", precompiled=data) self.assertTrue(isinstance(s, _PyV8.JSScript)) self.assertEqual("1+2", s.source) self.assertEqual(3, int(s.run())) self.assertRaises(SyntaxError, engine.precompile, "1+") def testUnicodeSource(self): class Global(JSClass): var = u'测试' def __getattr__(self, name): if (name if is_py3k else name.decode('utf-8')) == u'变量': return self.var return JSClass.__getattr__(self, name) g = Global() with JSContext(g) as ctxt: with JSEngine() as engine: src = u""" function 函数() { return 变量.length; } 函数(); var func = function () {}; """ data = engine.precompile(src) self.assertTrue(data) self.assertEqual(68, len(data)) s = engine.compile(src, precompiled=data) self.assertTrue(isinstance(s, _PyV8.JSScript)) self.assertEqual(toNativeString(src), s.source) self.assertEqual(2, s.run()) func_name = toNativeString(u'函数') self.assertTrue(hasattr(ctxt.locals, func_name)) func = getattr(ctxt.locals, func_name) self.assertTrue(isinstance(func, _PyV8.JSFunction)) self.assertEqual(func_name, func.name) self.assertEqual("", func.resname) self.assertEqual(1, func.linenum) self.assertEqual(0, func.lineoff) self.assertEqual(0, func.coloff) var_name = toNativeString(u'变量') setattr(ctxt.locals, var_name, u'测试长字符串') self.assertEqual(6, func()) self.assertEqual("func", ctxt.locals.func.inferredname) def testExtension(self): extSrc = """function hello(name) { return "hello " + name + " from javascript"; }""" extJs = JSExtension("hello/javascript", extSrc) self.assertTrue(extJs) self.assertEqual("hello/javascript", extJs.name) self.assertEqual(extSrc, extJs.source) self.assertFalse(extJs.autoEnable) self.assertTrue(extJs.registered) TestEngine.extJs = extJs with JSContext(extensions=['hello/javascript']) as ctxt: self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')")) # test the auto enable property with JSContext() as ctxt: self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')") extJs.autoEnable = True self.assertTrue(extJs.autoEnable) with JSContext() as ctxt: self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')")) extJs.autoEnable = False self.assertFalse(extJs.autoEnable) with JSContext() as ctxt: self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')") extUnicodeSrc = u"""function helloW(name) { return "hello " + name + " from javascript"; }""" extUnicodeJs = JSExtension(u"helloW/javascript", extUnicodeSrc) self.assertTrue(extUnicodeJs) self.assertEqual("helloW/javascript", extUnicodeJs.name) self.assertEqual(toNativeString(extUnicodeSrc), extUnicodeJs.source) self.assertFalse(extUnicodeJs.autoEnable) self.assertTrue(extUnicodeJs.registered) TestEngine.extUnicodeJs = extUnicodeJs with JSContext(extensions=['helloW/javascript']) as ctxt: self.assertEqual("hello flier from javascript", ctxt.eval("helloW('flier')")) ret = ctxt.eval(u"helloW('世界')") self.assertEqual(u"hello 世界 from javascript", ret if is_py3k else ret.decode('UTF-8')) def testNativeExtension(self): extSrc = "native function hello();" extPy = JSExtension("hello/python", extSrc, lambda func: lambda name: "hello " + name + " from python", register=False) self.assertTrue(extPy) self.assertEqual("hello/python", extPy.name) self.assertEqual(extSrc, extPy.source) self.assertFalse(extPy.autoEnable) self.assertFalse(extPy.registered) extPy.register() self.assertTrue(extPy.registered) TestEngine.extPy = extPy with JSContext(extensions=['hello/python']) as ctxt: self.assertEqual("hello flier from python", ctxt.eval("hello('flier')")) def _testSerialize(self): data = None self.assertFalse(JSContext.entered) with JSContext() as ctxt: self.assertTrue(JSContext.entered) #ctxt.eval("function hello(name) { return 'hello ' + name; }") data = JSEngine.serialize() self.assertTrue(data) self.assertTrue(len(data) > 0) self.assertFalse(JSContext.entered) #JSEngine.deserialize() self.assertTrue(JSContext.entered) self.assertEqual('hello flier', JSContext.current.eval("hello('flier');")) def testEval(self): with JSContext() as ctxt: self.assertEqual(3, int(ctxt.eval("1+2"))) def testGlobal(self): class Global(JSClass): version = "1.0" with JSContext(Global()) as ctxt: vars = ctxt.locals # getter self.assertEqual(Global.version, str(vars.version)) self.assertEqual(Global.version, str(ctxt.eval("version"))) self.assertRaises(ReferenceError, ctxt.eval, "nonexists") # setter self.assertEqual(2.0, float(ctxt.eval("version = 2.0"))) self.assertEqual(2.0, float(vars.version)) def testThis(self): class Global(JSClass): version = 1.0 with JSContext(Global()) as ctxt: self.assertEqual("[object Global]", str(ctxt.eval("this"))) self.assertEqual(1.0, float(ctxt.eval("this.version"))) def testObjectBuildInMethods(self): class Global(JSClass): version = 1.0 with JSContext(Global()) as ctxt: self.assertEqual("[object Global]", str(ctxt.eval("this.toString()"))) self.assertEqual("[object Global]", str(ctxt.eval("this.toLocaleString()"))) self.assertEqual(Global.version, float(ctxt.eval("this.valueOf()").version)) self.assertTrue(bool(ctxt.eval("this.hasOwnProperty(\"version\")"))) self.assertFalse(ctxt.eval("this.hasOwnProperty(\"nonexistent\")")) def testPythonWrapper(self): class Global(JSClass): s = [1, 2, 3] d = {'a': {'b': 'c'}, 'd': ['e', 'f']} g = Global() with JSContext(g) as ctxt: ctxt.eval(""" s[2] = s[1] + 2; s[0] = s[1]; delete s[1]; """) self.assertEqual([2, 4], g.s) self.assertEqual('c', ctxt.eval("d.a.b")) self.assertEqual(['e', 'f'], ctxt.eval("d.d")) ctxt.eval(""" d.a.q = 4 delete d.d """) self.assertEqual(4, g.d['a']['q']) self.assertEqual(None, ctxt.eval("d.d")) def _testMemoryAllocationCallback(self): alloc = {} def callback(space, action, size): alloc[(space, action)] = alloc.setdefault((space, action), 0) + size JSEngine.setMemoryAllocationCallback(callback) with JSContext() as ctxt: self.assertFalse((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc) ctxt.eval("var o = new Array(1000);") self.assertTrue((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc) JSEngine.setMemoryAllocationCallback(None) class TestDebug(unittest.TestCase): def setUp(self): self.engine = JSEngine() def tearDown(self): del self.engine events = [] def processDebugEvent(self, event): try: logging.debug("receive debug event: %s", repr(event)) self.events.append(repr(event)) except: logging.error("fail to process debug event") logging.debug(traceback.extract_stack()) def testEventDispatch(self): debugger = JSDebugger() self.assertTrue(not debugger.enabled) debugger.onBreak = lambda evt: self.processDebugEvent(evt) debugger.onException = lambda evt: self.processDebugEvent(evt) debugger.onNewFunction = lambda evt: self.processDebugEvent(evt) debugger.onBeforeCompile = lambda evt: self.processDebugEvent(evt) debugger.onAfterCompile = lambda evt: self.processDebugEvent(evt) with JSContext() as ctxt: debugger.enabled = True self.assertEqual(3, int(ctxt.eval("function test() { text = \"1+2\"; return eval(text) } test()"))) debugger.enabled = False self.assertRaises(JSError, JSContext.eval, ctxt, "throw 1") self.assertTrue(not debugger.enabled) self.assertEqual(4, len(self.events)) class TestProfile(unittest.TestCase): def _testStart(self): self.assertFalse(profiler.started) profiler.start() self.assertTrue(profiler.started) profiler.stop() self.assertFalse(profiler.started) def _testResume(self): self.assertTrue(profiler.paused) self.assertEqual(profiler.Modules.cpu, profiler.modules) profiler.resume() profiler.resume(profiler.Modules.heap) # TODO enable profiler with resume #self.assertFalse(profiler.paused) class TestAST(unittest.TestCase): class Checker(object): def __init__(self, testcase): self.testcase = testcase self.called = [] def __enter__(self): self.ctxt = JSContext() self.ctxt.enter() return self def __exit__(self, exc_type, exc_value, traceback): self.ctxt.leave() def __getattr__(self, name): return getattr(self.testcase, name) def test(self, script): JSEngine().compile(script).visit(self) return self.called def onProgram(self, prog): self.ast = prog.toAST() self.json = json.loads(prog.toJSON()) for decl in prog.scope.declarations: decl.visit(self) for stmt in prog.body: stmt.visit(self) def onBlock(self, block): for stmt in block.statements: stmt.visit(self) def onExpressionStatement(self, stmt): stmt.expression.visit(self) #print type(stmt.expression), stmt.expression def testBlock(self): class BlockChecker(TestAST.Checker): def onBlock(self, stmt): self.called.append('block') self.assertEqual(AST.NodeType.Block, stmt.type) self.assertTrue(stmt.initializerBlock) self.assertFalse(stmt.anonymous) target = stmt.breakTarget self.assertTrue(target) self.assertFalse(target.bound) self.assertTrue(target.unused) self.assertFalse(target.linked) self.assertEqual(2, len(stmt.statements)) self.assertEqual(['%InitializeVarGlobal("i", 0);', '%InitializeVarGlobal("j", 0);'], [str(s) for s in stmt.statements]) with BlockChecker(self) as checker: self.assertEqual(['block'], checker.test("var i, j;")) self.assertEqual("""FUNC . NAME "" . INFERRED NAME "" . DECLS . . VAR "i" . . VAR "j" . BLOCK INIT . . CALL RUNTIME InitializeVarGlobal . . . LITERAL "i" . . . LITERAL 0 . . CALL RUNTIME InitializeVarGlobal . . . LITERAL "j" . . . LITERAL 0 """, checker.ast) self.assertEqual([u'FunctionLiteral', {u'name': u''}, [u'Declaration', {u'mode': u'VAR'}, [u'Variable', {u'name': u'i'}] ], [u'Declaration', {u'mode':u'VAR'}, [u'Variable', {u'name': u'j'}] ], [u'Block', [u'ExpressionStatement', [u'CallRuntime', {u'name': u'InitializeVarGlobal'}, [u'Literal', {u'handle':u'i'}], [u'Literal', {u'handle': 0}]]], [u'ExpressionStatement', [u'CallRuntime', {u'name': u'InitializeVarGlobal'}, [u'Literal', {u'handle': u'j'}], [u'Literal', {u'handle': 0}]]] ] ], checker.json) def testIfStatement(self): class IfStatementChecker(TestAST.Checker): def onIfStatement(self, stmt): self.called.append('if') self.assertTrue(stmt) self.assertEqual(AST.NodeType.IfStatement, stmt.type) self.assertEqual(7, stmt.pos) stmt.pos = 100 self.assertEqual(100, stmt.pos) self.assertTrue(stmt.hasThenStatement) self.assertTrue(stmt.hasElseStatement) self.assertEqual("((value % 2) == 0)", str(stmt.condition)) self.assertEqual("{ s = \"even\"; }", str(stmt.thenStatement)) self.assertEqual("{ s = \"odd\"; }", str(stmt.elseStatement)) self.assertFalse(stmt.condition.isPropertyName) with IfStatementChecker(self) as checker: self.assertEqual(['if'], checker.test("var s; if (value % 2 == 0) { s = 'even'; } else { s = 'odd'; }")) def testForStatement(self): class ForStatementChecker(TestAST.Checker): def onForStatement(self, stmt): self.called.append('for') self.assertEqual("{ j += i; }", str(stmt.body)) self.assertEqual("i = 0;", str(stmt.init)) self.assertEqual("(i < 10)", str(stmt.condition)) self.assertEqual("(i++);", str(stmt.nextStmt)) target = stmt.continueTarget self.assertTrue(target) self.assertFalse(target.bound) self.assertTrue(target.unused) self.assertFalse(target.linked) self.assertFalse(stmt.fastLoop) def onForInStatement(self, stmt): self.called.append('forIn') self.assertEqual("{ out += name; }", str(stmt.body)) self.assertEqual("name", str(stmt.each)) self.assertEqual("names", str(stmt.enumerable)) def onWhileStatement(self, stmt): self.called.append('while') self.assertEqual("{ i += 1; }", str(stmt.body)) self.assertEqual("(i < 10)", str(stmt.condition)) def onDoWhileStatement(self, stmt): self.called.append('doWhile') self.assertEqual("{ i += 1; }", str(stmt.body)) self.assertEqual("(i < 10)", str(stmt.condition)) self.assertEqual(281, stmt.conditionPos) with ForStatementChecker(self) as checker: self.assertEqual(['for', 'forIn', 'while', 'doWhile'], checker.test(""" var i, j; for (i=0; i<10; i++) { j+=i; } var names = new Array(); var out = ''; for (name in names) { out += name; } while (i<10) { i += 1; } do { i += 1; } while (i<10); """)) def testCallStatements(self): class CallStatementChecker(TestAST.Checker): def onVariableDeclaration(self, decl): self.called.append('var') var = decl.proxy if var.name == 's': self.assertEqual(AST.VarMode.var, decl.mode) self.assertTrue(var.isValidLeftHandSide) self.assertFalse(var.isArguments) self.assertFalse(var.isThis) def onFunctionDeclaration(self, decl): self.called.append('func') var = decl.proxy if var.name == 'hello': self.assertEqual(AST.VarMode.var, decl.mode) self.assertTrue(decl.function) self.assertEqual('(function hello(name) { s = ("Hello " + name); })', str(decl.function)) elif var.name == 'dog': self.assertEqual(AST.VarMode.var, decl.mode) self.assertTrue(decl.function) self.assertEqual('(function dog(name) { (this).name = name; })', str(decl.function)) def onCall(self, expr): self.called.append('call') self.assertEqual("hello", str(expr.expression)) self.assertEqual(['"flier"'], [str(arg) for arg in expr.args]) self.assertEqual(159, expr.pos) def onCallNew(self, expr): self.called.append('callNew') self.assertEqual("dog", str(expr.expression)) self.assertEqual(['"cat"'], [str(arg) for arg in expr.args]) self.assertEqual(191, expr.pos) def onCallRuntime(self, expr): self.called.append('callRuntime') self.assertEqual("InitializeVarGlobal", expr.name) self.assertEqual(['"s"', '0'], [str(arg) for arg in expr.args]) self.assertFalse(expr.isJsRuntime) with CallStatementChecker(self) as checker: self.assertEqual(['var', 'func', 'func', 'callRuntime', 'call', 'callNew'], checker.test(""" var s; function hello(name) { s = "Hello " + name; } function dog(name) { this.name = name; } hello("flier"); new dog("cat"); """)) def testTryStatements(self): class TryStatementsChecker(TestAST.Checker): def onThrow(self, expr): self.called.append('try') self.assertEqual('"abc"', str(expr.exception)) self.assertEqual(66, expr.pos) def onTryCatchStatement(self, stmt): self.called.append('catch') self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock)) #FIXME self.assertEqual([], stmt.targets) stmt.tryBlock.visit(self) self.assertEqual("err", str(stmt.variable.name)) self.assertEqual("{ s = err; }", str(stmt.catchBlock)) def onTryFinallyStatement(self, stmt): self.called.append('finally') self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock)) #FIXME self.assertEqual([], stmt.targets) self.assertEqual("{ s += \".\"; }", str(stmt.finallyBlock)) with TryStatementsChecker(self) as checker: self.assertEqual(['catch', 'try', 'finally'], checker.test(""" var s; try { throw "abc"; } catch (err) { s = err; }; try { throw "abc"; } finally { s += "."; } """)) def testLiterals(self): class LiteralChecker(TestAST.Checker): def onCallRuntime(self, expr): expr.args[1].visit(self) def onLiteral(self, litr): self.called.append('literal') self.assertFalse(litr.isPropertyName) self.assertFalse(litr.isNull) self.assertFalse(litr.isTrue) def onRegExpLiteral(self, litr): self.called.append('regex') self.assertEqual("test", litr.pattern) self.assertEqual("g", litr.flags) def onObjectLiteral(self, litr): self.called.append('object') self.assertEqual('constant:"name"="flier",constant:"sex"=true', ",".join(["%s:%s=%s" % (prop.kind, prop.key, prop.value) for prop in litr.properties])) def onArrayLiteral(self, litr): self.called.append('array') self.assertEqual('"hello","world",42', ",".join([str(value) for value in litr.values])) with LiteralChecker(self) as checker: self.assertEqual(['literal', 'regex', 'literal', 'literal'], checker.test(""" false; /test/g; var o = { name: 'flier', sex: true }; var a = ['hello', 'world', 42]; """)) def testOperations(self): class OperationChecker(TestAST.Checker): def onUnaryOperation(self, expr): self.called.append('unaryOp') self.assertEqual(AST.Op.BIT_NOT, expr.op) self.assertEqual("i", expr.expression.name) #print "unary", expr def onIncrementOperation(self, expr): self.fail() def onBinaryOperation(self, expr): self.called.append('binOp') self.assertEqual(AST.Op.ADD, expr.op) self.assertEqual("i", str(expr.left)) self.assertEqual("j", str(expr.right)) self.assertEqual(36, expr.pos) #print "bin", expr def onAssignment(self, expr): self.called.append('assign') self.assertEqual(AST.Op.ASSIGN_ADD, expr.op) self.assertEqual(AST.Op.ADD, expr.binop) self.assertEqual("i", str(expr.target)) self.assertEqual("1", str(expr.value)) self.assertEqual(53, expr.pos) self.assertEqual("(i + 1)", str(expr.binOperation)) self.assertTrue(expr.compound) def onCountOperation(self, expr): self.called.append('countOp') self.assertFalse(expr.prefix) self.assertTrue(expr.postfix) self.assertEqual(AST.Op.INC, expr.op) self.assertEqual(AST.Op.ADD, expr.binop) self.assertEqual(71, expr.pos) self.assertEqual("i", expr.expression.name) #print "count", expr def onCompareOperation(self, expr): self.called.append('compOp') if len(self.called) == 4: self.assertEqual(AST.Op.EQ, expr.op) self.assertEqual(88, expr.pos) # i==j else: self.assertEqual(AST.Op.EQ_STRICT, expr.op) self.assertEqual(106, expr.pos) # i===j self.assertEqual("i", str(expr.left)) self.assertEqual("j", str(expr.right)) #print "comp", expr def onConditional(self, expr): self.called.append('conditional') self.assertEqual("(i > j)", str(expr.condition)) self.assertEqual("i", str(expr.thenExpr)) self.assertEqual("j", str(expr.elseExpr)) self.assertEqual(144, expr.thenExprPos) self.assertEqual(146, expr.elseExprPos) with OperationChecker(self) as checker: self.assertEqual(['binOp', 'assign', 'countOp', 'compOp', 'compOp', 'unaryOp', 'conditional'], checker.test(""" var i, j; i+j; i+=1; i++; i==j; i===j; ~i; i>j?i:j; """)) def testSwitchStatement(self): class SwitchStatementChecker(TestAST.Checker): def onSwitchStatement(self, stmt): self.called.append('switch') self.assertEqual('expr', stmt.tag.name) self.assertEqual(2, len(stmt.cases)) case = stmt.cases[0] self.assertFalse(case.isDefault) self.assertTrue(case.label.isString) self.assertEqual(0, case.bodyTarget.pos) self.assertEqual(57, case.position) self.assertEqual(1, len(case.statements)) case = stmt.cases[1] self.assertTrue(case.isDefault) self.assertEqual(None, case.label) self.assertEqual(0, case.bodyTarget.pos) self.assertEqual(109, case.position) self.assertEqual(1, len(case.statements)) with SwitchStatementChecker(self) as checker: self.assertEqual(['switch'], checker.test(""" switch (expr) { case 'flier': break; default: break; } """)) if __name__ == '__main__': if "-v" in sys.argv: level = logging.DEBUG else: level = logging.WARN if "-p" in sys.argv: sys.argv.remove("-p") print("Press any key to continue or attach process #%d..." % os.getpid()) raw_input() logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(message)s') logging.info("testing PyV8 module %s with V8 v%s", __version__, JSEngine.version) unittest.main()
serial_actuator.py
"""Control the car using serial port.""" import logging import threading import time import serial from ..utils import memory, settings, utils class SerialActuator: """This classs send through serial port commands to an Arduino to pilot a motors and a servo motor using PWM.""" def __init__( self, memory=memory.mem, port=settings.settings.SERIAL_PORT, steering_key="steering", throttle_key="throttle", speed_key="speed", ): """Initialize the class. Args: memory (dict, optional): memory object. port (str, optional): serial port. Defaults to "/dev/ttyUSB0". example /dev/ttyS2 or /dev/ttyUSB0 steering_key (str, optional): key of the steering in the memory. Defaults to "steering". throttle_key (str, optional): key of the throttle in the memory. Defaults to "throttle". speed_key (str, optional): key of the speed in the memory. Defaults to "speed". """ if port == "loop://": # this one is for testing purposes self.ser = serial.serial_for_url("loop://") else: self.ser = serial.Serial() self.ser.port = port self.ser.baudrate = 115200 self.ser.bytesize = serial.EIGHTBITS # number of bits per bytes self.ser.parity = serial.PARITY_NONE # set parity check: no parity self.ser.stopbits = serial.STOPBITS_ONE # number of stop bits self.ser.timeout = 0 # 0 = no timeout self.__sensor_rpm = 0 # init rpm of the sensor to 0 self.__command = bytearray([255, 127, 127, 0]) self.__isRuning = True self.__isOperation = False self.__boosting = False self.__toSend = [] self.__memory = memory self.__steering_key = steering_key self.__throttle_key = throttle_key self.__speed_key = speed_key self.__ignore_next = False self.__steering = 127 self.__throttle = 127 self.__wheel_to_meters = 0.50 # 1 wheel turn = 0.20 m self.__gear_ratio = 7 # 7 motor turn = 1 wheel turn self.__last_received = time.time() try: if not self.ser.isOpen(): self.ser.open() self.start_thread() # check which port was really used logging.info(f"Serial port {self.ser.portstr} opened") except Exception as e: logging.error(f"Error opening port: {e}") def stop(self): self.__isRuning = False self.__thread.join() if self.ser.is_open: self.apply_steering_throttle(0, 0) self.ser.close() # close port def start_thread(self): self.__thread = threading.Thread(target=self.__run_threaded__) self.__thread.start() def __run_threaded__(self): while self.__isRuning: self.__read_rpm__() def __safe_write__(self, command): while self.__isOperation: pass self.__isOperation = True # print("writing", command) self.ser.write(command) self.__isOperation = False def __read_rpm__(self): if self.ser.in_waiting >= 1: while self.__isOperation: pass self.__isOperation = True try: out = self.ser.readlines()[-1] self.__decode_out__(out) finally: self.__isOperation = False def __decode_out__(self, out): if self.__ignore_next: self.__ignore_next = False else: # make sure that both end of lines are present if out != "" and out.endswith(b"\r\n"): res = int(out.decode()) if 134 > self.__throttle > 120 and res > 27000: self.__sensor_rpm = 0 else: self.__sensor_rpm = 30000000 / res # print(self.__throttle, res) self.__last_received = time.time() self.__memory[self.__speed_key] = ( self.__sensor_rpm / (self.__gear_ratio * 60) ) * self.__wheel_to_meters else: self.__ignore_next = True def apply_steering(self, steering): """Change steering. Args: steering (float): steering between -1 and 1. """ self.__steering = int(utils.map_value(steering, -1, 1, 0, 255)) self.__command[1] = self.__steering self.ser.write(self.__command) def apply_throttle(self, throttle): """Change motor throttle. Args: throttle (float): throttle between -1 and 1. """ self.__throttle = int(utils.map_value(throttle, -1, 1, 0, 255)) self.__command[2] = self.__throttle self.ser.write(self.__command) def apply_steering_throttle(self, steering, throttle): """Change all the elements at the same time. Args: steering (float): steering between -1 and 1. throttle (float): throttle between -1 and 1. """ self.__steering = int(utils.map_value(steering, -1, 1, 0, 255)) self.__throttle = int(utils.map_value(throttle, -1, 1, 0, 255)) self.__command[1] = self.__steering self.__command[2] = self.__throttle self.ser.write(self.__command) def update(self): """Update steering and throttle using memory.""" steering = self.__memory.get(self.__steering_key, 0) throttle = self.__memory.get(self.__throttle_key, 0) self.apply_steering_throttle(steering, throttle) def get_sensor_last_received(self): return self.__last_received
echo_server_integrated.py
import threading import unittest import sys import os try: import thread except ImportError: import _thread as thread proj_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) socket_folder = os.path.join(proj_folder, 'websocket') sys.path.insert(0, socket_folder) from websock import WebSocketServer try: from websocket import create_connection except ImportError: exit('websocket-client is required to run integrated tests: \n\n\t$ pip install websocket-client ') class TestIntegrated(unittest.TestCase): """ Integrated tests on the server are run using the websocket client from https://github.com/websocket-client/websocket-client/blob/master/examples/echoapp_client.py """ def test_echo_server_single_client(self): def on_data_receive(client, data): """Called by the WebSocketServer when data is received.""" data += '!' server.send(client, data) def on_error(exception): """Called when the server returns an error """ raise exception server = WebSocketServer("127.0.0.1", 8467, on_data_receive=on_data_receive, on_error=on_error) server_thread = threading.Thread(target=server.serve_once, args=(), daemon=True) server_thread.start() print('Connected') ws = create_connection("ws://localhost:8467") ws.send("Hello, World") result = ws.recv() ws.close() self.assertEqual(result, "Hello, World!") if __name__ == "__main__": unittest.main()
gateway.py
# Copyright 2019 Novartis Institutes for BioMedical Research Inc. Licensed # under the Apache License, Version 2.0 (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy # of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless # required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. import json import logging # import BaseHTTPServer import os from threading import Lock, Thread from flask import ( Flask, make_response, redirect, render_template, request, send_from_directory, url_for, ) from flask_api import status from werkzeug.utils import secure_filename from cellxgene_gateway import env from cellxgene_gateway.backend_cache import BackendCache from cellxgene_gateway.cache_entry import CacheEntryStatus from cellxgene_gateway.cellxgene_exception import CellxgeneException from cellxgene_gateway.dir_util import create_dir, is_subdir from cellxgene_gateway.extra_scripts import get_extra_scripts from cellxgene_gateway.filecrawl import recurse_dir, render_entries from cellxgene_gateway.path_util import get_key from cellxgene_gateway.process_exception import ProcessException from cellxgene_gateway.prune_process_cache import PruneProcessCache from cellxgene_gateway.util import current_time_stamp app = Flask(__name__) def _force_https(app): def wrapper(environ, start_response): environ["wsgi.url_scheme"] = env.external_protocol return app(environ, start_response) return wrapper app.wsgi_app = _force_https(app.wsgi_app) cache = BackendCache() location = f"{env.external_protocol}://{env.external_host}" @app.errorhandler(CellxgeneException) def handle_invalid_usage(error): message = f"{error.http_status} Error : {error.message}" return ( render_template( "cellxgene_error.html", extra_scripts=get_extra_scripts(), message=message, ), error.http_status, ) @app.errorhandler(ProcessException) def handle_invalid_process(error): message = [] message.append(error.message) message.append(f"{error.http_status} Error.") message.append(f"Stdout: {error.stdout}") message.append(f"Stderr: {error.stderr}") return ( render_template( "process_error.html", extra_scripts=get_extra_scripts(), message=error.message, http_status=error.http_status, stdout=error.stdout, stderr=error.stderr, dataset=error.key.dataset, annotation_file=error.key.annotation_file, ), error.http_status, ) @app.route("/favicon.ico") def favicon(): return send_from_directory( os.path.join(app.root_path, "static"), "nibr.ico", mimetype="image/vnd.microsof.icon", ) @app.route("/") def index(): users = [ name for name in os.listdir(env.cellxgene_data) if os.path.isdir(os.path.join(env.cellxgene_data, name)) ] return render_template( "index.html", ip=env.ip, cellxgene_data=env.cellxgene_data, extra_scripts=get_extra_scripts(), users=users, enable_upload=env.enable_upload, ) def make_user(): dir_name = request.form["directory"] create_dir(env.cellxgene_data, dir_name) return redirect(location, code=302) def make_subdir(): parent_path = os.path.join(env.cellxgene_data, request.form["usernames"]) dir_name = request.form["directory"] create_dir(parent_path, dir_name) return redirect(location, code=302) def upload_file(): upload_dir = request.form["path"] full_upload_path = os.path.join(env.cellxgene_data, upload_dir) if is_subdir(full_upload_path, env.cellxgene_data) and os.path.isdir( full_upload_path ): if request.method == "POST": if "file" in request.files: f = request.files["file"] if f and f.filename.endswith(".h5ad"): f.save( os.path.join( full_upload_path, secure_filename(f.filename) ) ) return redirect("/filecrawl.html", code=302) else: raise CellxgeneException( "Uploaded file must be in anndata (.h5ad) format.", status.HTTP_400_BAD_REQUEST, ) else: raise CellxgeneException( "A file must be chosen to upload.", status.HTTP_400_BAD_REQUEST, ) else: raise CellxgeneException( "Invalid directory.", status.HTTP_400_BAD_REQUEST ) return redirect(location, code=302) if env.enable_upload: app.add_url_rule("/make_user", "make_user", make_user, methods=["POST"]) app.add_url_rule( "/make_subdir", "make_subdir", make_subdir, methods=["POST"] ) app.add_url_rule( "/upload_file", "upload_file", upload_file, methods=["POST"] ) def set_no_cache(resp): resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" resp.headers["Pragma"] = "no-cache" resp.headers["Expires"] = "0" resp.headers["Cache-Control"] = "public, max-age=0" return resp @app.route("/filecrawl.html") def filecrawl(): entries = recurse_dir(env.cellxgene_data) rendered_html = render_entries(entries) resp = make_response( render_template( "filecrawl.html", extra_scripts=get_extra_scripts(), rendered_html=rendered_html, ) ) return set_no_cache(resp) @app.route("/filecrawl/<path:path>") def do_filecrawl(path): filecrawl_path = os.path.join(env.cellxgene_data, path) if not os.path.isdir(filecrawl_path): raise CellxgeneException( "Path is not directory: " + filecrawl_path, status.HTTP_400_BAD_REQUEST, ) entries = recurse_dir(filecrawl_path) rendered_html = render_entries(entries) return render_template( "filecrawl.html", extra_scripts=get_extra_scripts(), rendered_html=rendered_html, path=path, ) entry_lock = Lock() @app.route("/view/<path:path>", methods=["GET", "PUT", "POST"]) def do_view(path): key = get_key(path) print( f"view path={path}, dataset={key.dataset}, annotation_file= {key.annotation_file}, key={key.pathpart}" ) with entry_lock: match = cache.check_entry(key) if match is None: uascripts = get_extra_scripts() match = cache.create_entry(key, uascripts) match.timestamp = current_time_stamp() if ( match.status == CacheEntryStatus.loaded or match.status == CacheEntryStatus.loading ): return match.serve_content(path) elif match.status == CacheEntryStatus.error: raise ProcessException.from_cache_entry(match) @app.route("/cache_status", methods=["GET"]) def do_GET_status(): return render_template("cache_status.html", entry_list=cache.entry_list) @app.route("/cache_status.json", methods=["GET"]) def do_GET_status_json(): return json.dumps( { "launchtime": app.launchtime, "entry_list": [ { "dataset": entry.key.dataset, "annotation_file": entry.key.annotation_file, "launchtime": entry.launchtime, "last_access": entry.timestamp, "status": entry.status, } for entry in cache.entry_list ], } ) @app.route("/relaunch/<path:path>", methods=["GET"]) def do_relaunch(path): key = get_key(path) match = cache.check_entry(key) if not match is None: match.terminate() qs = request.query_string.decode() return redirect( url_for("do_view", path=path) + (f"?{qs}" if len(qs) > 0 else ""), code=302, ) @app.route("/terminate/<path:path>", methods=["GET"]) def do_terminate(path): key = get_key(path) match = cache.check_entry(key) if not match is None: match.terminate() return redirect(url_for("do_GET_status"), code=302) @app.route("/metadata/ip_address", methods=["GET"]) def ip_address(): resp = make_response(env.ip) return set_no_cache(resp) def main(): logging.basicConfig( level=logging.INFO, format="%(asctime)s:%(name)s:%(levelname)s:%(message)s", ) env.validate() pruner = PruneProcessCache(cache) background_thread = Thread(target=pruner) background_thread.start() app.launchtime = current_time_stamp() app.run(host="0.0.0.0", port=env.gateway_port, debug=False) if __name__ == "__main__": main()
cleaner.py
# Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Martin Barisits <martin.barisits@cern.ch>, 2013-2016 # - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2015 # - Cedric Serfon <cedric.serfon@cern.ch>, 2013 # - Vincent Garonne <vgaronne@gmail.com>, 2014-2018 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # # PY3K COMPATIBLE """ Judge-Cleaner is a daemon to clean expired replication rules. """ import logging import os import socket import sys import threading import time import traceback from copy import deepcopy from datetime import datetime, timedelta from re import match from random import randint from sqlalchemy.exc import DatabaseError from rucio.common.config import config_get from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound from rucio.core.heartbeat import live, die, sanity_check from rucio.core.rule import delete_rule, get_expired_rules from rucio.core.monitor import record_counter from rucio.db.sqla.util import get_db_time graceful_stop = threading.Event() logging.basicConfig(stream=sys.stdout, level=getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper()), format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s') def rule_cleaner(once=False): """ Main loop to check for expired replication rules """ hostname = socket.gethostname() pid = os.getpid() current_thread = threading.current_thread() paused_rules = {} # {rule_id: datetime} # Make an initial heartbeat so that all judge-cleaners have the correct worker number on the next try live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread) graceful_stop.wait(1) while not graceful_stop.is_set(): try: # heartbeat heartbeat = live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread) start = time.time() # Refresh paused rules iter_paused_rules = deepcopy(paused_rules) for key in iter_paused_rules: if datetime.utcnow() > paused_rules[key]: del paused_rules[key] rules = get_expired_rules(total_workers=heartbeat['nr_threads'] - 1, worker_number=heartbeat['assign_thread'], limit=200, blacklisted_rules=[key for key in paused_rules]) logging.debug('rule_cleaner[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(rules))) if not rules and not once: logging.debug('rule_cleaner[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_rules)))) graceful_stop.wait(60) else: for rule in rules: rule_id = rule[0] rule_expression = rule[1] logging.info('rule_cleaner[%s/%s]: Deleting rule %s with expression %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, rule_expression)) if graceful_stop.is_set(): break try: start = time.time() delete_rule(rule_id=rule_id, nowait=True) logging.debug('rule_cleaner[%s/%s]: deletion of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, time.time() - start)) except (DatabaseException, DatabaseError, UnsupportedOperation) as e: if match('.*ORA-00054.*', str(e.args[0])): paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400)) record_counter('rule.judge.exceptions.LocksDetected') logging.warning('rule_cleaner[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id)) elif match('.*QueuePool.*', str(e.args[0])): logging.warning(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) elif match('.*ORA-03135.*', str(e.args[0])): logging.warning(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) else: logging.error(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) except RuleNotFound as e: pass except (DatabaseException, DatabaseError) as e: if match('.*QueuePool.*', str(e.args[0])): logging.warning(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) elif match('.*ORA-03135.*', str(e.args[0])): logging.warning(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) else: logging.critical(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) except Exception as e: logging.critical(traceback.format_exc()) record_counter('rule.judge.exceptions.%s' % e.__class__.__name__) if once: break die(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread) def stop(signum=None, frame=None): """ Graceful exit. """ graceful_stop.set() def run(once=False, threads=1): """ Starts up the Judge-Clean threads. """ client_time, db_time = datetime.utcnow(), get_db_time() max_offset = timedelta(hours=1, seconds=10) if type(db_time) is datetime: if db_time - client_time > max_offset or client_time - db_time > max_offset: logging.critical('Offset between client and db time too big. Stopping Cleaner') return hostname = socket.gethostname() sanity_check(executable='rucio-judge-cleaner', hostname=hostname) if once: rule_cleaner(once) else: logging.info('Cleaner starting %s threads' % str(threads)) threads = [threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in range(0, threads)] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
EventLoopTest.py
########################################################################## # # Copyright (c) 2011-2012, John Haddon. All rights reserved. # Copyright (c) 2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import threading import time import functools import IECore import Gaffer import GafferUI import GafferUITest from Qt import QtCore from Qt import QtWidgets class EventLoopTest( GafferUITest.TestCase ) : def testIdleCallbacks( self ) : self.__idleCalls = 0 def idle() : self.__idleCalls += 1 return self.__idleCalls < 2 def stop() : if self.__idleCalls==2 : GafferUI.EventLoop.mainEventLoop().stop() return False return True GafferUI.EventLoop.addIdleCallback( idle ) GafferUI.EventLoop.addIdleCallback( stop ) GafferUI.EventLoop.mainEventLoop().start() self.assertEqual( self.__idleCalls, 2 ) def testWaitForIdle( self ) : self.__idleCalls = 0 def idle( total ) : self.__idleCalls += 1 return self.__idleCalls < total GafferUI.EventLoop.addIdleCallback( functools.partial( idle, 1000 ) ) GafferUI.EventLoop.waitForIdle() self.assertEqual( self.__idleCalls, 1000 ) GafferUI.EventLoop.addIdleCallback( functools.partial( idle, 1005 ) ) GafferUI.EventLoop.waitForIdle( 5 ) self.assertEqual( self.__idleCalls, 1005 ) def testExecuteOnUITheadAndWaitForResult( self ) : def f() : GafferUI.EventLoop.mainEventLoop().stop() self.__uiThreadFunctionCalled = True self.__uiThreadCalledOnCorrectThread = QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() return 101 def t() : self.__uiThreadResult = GafferUI.EventLoop.executeOnUIThread( f, waitForResult=True ) thread = threading.Thread( target = t ) GafferUI.EventLoop.addIdleCallback( thread.start ) GafferUI.EventLoop.mainEventLoop().start() thread.join() self.assertEqual( self.__uiThreadFunctionCalled, True ) self.assertEqual( self.__uiThreadCalledOnCorrectThread, True ) self.assertEqual( self.__uiThreadResult, 101 ) def testExecuteOnUITheadAndDontWaitForResult( self ) : def f() : time.sleep( 2 ) GafferUI.EventLoop.mainEventLoop().stop() self.__uiThreadFunctionCalled = True self.__uiThreadCalledOnCorrectThread = QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() return 101 def t() : st = time.clock() self.__uiThreadResult = GafferUI.EventLoop.executeOnUIThread( f, waitForResult=False ) self.__executeOnUIThreadDuration = time.clock() - st thread = threading.Thread( target = t ) GafferUI.EventLoop.addIdleCallback( thread.start ) GafferUI.EventLoop.mainEventLoop().start() thread.join() self.assertEqual( self.__uiThreadFunctionCalled, True ) self.assertEqual( self.__uiThreadCalledOnCorrectThread, True ) self.assertEqual( self.__uiThreadResult, None ) # we shouldn't be waiting for the result of ui thread, so the return should be quicker # than the actual function called self.assertLess( self.__executeOnUIThreadDuration, 2 ) def testExceptionsInIdleCallbacks( self ) : self.__idle1Calls = 0 self.__idle2Calls = 0 def idle1() : self.__idle1Calls += 1 raise RuntimeError( "I am a very naughty boy" ) def idle2() : self.__idle2Calls += 1 return True def stop() : if self.__idle2Calls==4 : GafferUI.EventLoop.mainEventLoop().stop() return False return True GafferUI.EventLoop.addIdleCallback( idle1 ) GafferUI.EventLoop.addIdleCallback( idle2 ) GafferUI.EventLoop.addIdleCallback( stop ) mh = IECore.CapturingMessageHandler() with mh : GafferUI.EventLoop.mainEventLoop().start() self.assertEqual( self.__idle1Calls, 1 ) self.assertGreaterEqual( self.__idle2Calls, 4 ) self.assertEqual( len( mh.messages ), 1 ) self.assertEqual( mh.messages[0].level, IECore.Msg.Level.Error ) self.assertIn( "I am a very naughty boy", mh.messages[0].message ) def testExecuteOnUITheadFromUIThread( self ) : # if we're on the ui thread already when we call executeOnUIThread(), # then our function should be called immediately. self.__executed = False def f() : self.__executed = True return 10 r = GafferUI.EventLoop.executeOnUIThread( f ) self.assertEqual( r, 10 ) self.assertEqual( self.__executed, True ) def testAddIdleCallbackFromIdleCallback( self ) : self.__runOnceCalls = 0 self.__addRunOnceCalls = 0 def runOnce() : self.__runOnceCalls += 1 return False # so we're removed immediately def addRunOnce() : self.__addRunOnceCalls += 1 if self.__addRunOnceCalls==2 : GafferUI.EventLoop.mainEventLoop().stop() return False GafferUI.EventLoop.mainEventLoop().addIdleCallback( runOnce ) return True GafferUI.EventLoop.addIdleCallback( runOnce ) GafferUI.EventLoop.addIdleCallback( addRunOnce ) GafferUI.EventLoop.mainEventLoop().start() self.assertEqual( self.__runOnceCalls, 2 ) self.assertEqual( self.__addRunOnceCalls, 2 ) def setUp( self ) : GafferUITest.TestCase.setUp( self ) self.__uiThreadFunctionCalled = False self.__uiThreadCalledOnCorrectThread = False self.__uiThreadResult = None self.__executeOnUIThreadDuration = 10000 if __name__ == "__main__": unittest.main()
app_test.py
from __future__ import print_function from __future__ import unicode_literals import re import socket import subprocess import ttfw_idf import time import netifaces from threading import Thread, Event def run_server(server_stop, port, server_ip, client_ip): print("Starting PPP server on port: {}".format(port)) try: arg_list = ['pppd', port, '115200', '{}:{}'.format(server_ip, client_ip), 'modem', 'local', 'noauth', 'debug', 'nocrtscts', 'nodetach', '+ipv6'] p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, bufsize=1) while not server_stop.is_set(): if p.poll() is not None: raise ValueError('ENV_TEST_FAILURE: PPP terminated unexpectedly with {}'.format(p.poll())) line = p.stdout.readline() if line: print("[PPPD:]{}".format(line.rstrip())) time.sleep(0.1) except Exception as e: print(e) raise ValueError('ENV_TEST_FAILURE: Error running PPP server') finally: p.terminate() print("PPP server stopped") @ttfw_idf.idf_custom_test(env_tag="Example_PPP", group="test-apps") def test_examples_protocol_pppos_connect(env, extra_data): """ steps: 1. starts PPP server 2. get DUT as PPP client to connect to the server 3. check TCP client-server connection between client-server """ dut1 = env.get_dut("pppos_connect_test", "tools/test_apps/protocols/pppos", dut_class=ttfw_idf.ESP32DUT) # Look for test case symbolic names try: server_ip = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_PPP_SERVER_IP"].replace('"','') client_ip = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_PPP_CLIENT_IP"].replace('"','') port_nr = dut1.app.get_sdkconfig()["CONFIG_TEST_APP_TCP_PORT"] except Exception: print('ENV_TEST_FAILURE: Some mandatory configuration not found in sdkconfig') raise print("Starting the test on {}".format(dut1)) dut1.start_app() # the PPP test env uses two ttyUSB's: one for ESP32 board, another one for ppp server # use the other port for PPP server than the DUT/ESP port = '/dev/ttyUSB0' if dut1.port == '/dev/ttyUSB1' else '/dev/ttyUSB1' # Start the PPP server server_stop = Event() t = Thread(target=run_server, args=(server_stop, port, server_ip, client_ip)) t.start() try: ppp_server_timeout = time.time() + 30 while "ppp0" not in netifaces.interfaces(): print("PPP server haven't yet setup its netif, list of active netifs:{}".format(netifaces.interfaces())) time.sleep(0.5) if time.time() > ppp_server_timeout: raise ValueError("ENV_TEST_FAILURE: PPP server failed to setup ppp0 interface within timeout") ip6_addr = dut1.expect(re.compile(r"Got IPv6 address (\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4}\:\w{4})"), timeout=30)[0] print("IPv6 address of ESP: {}".format(ip6_addr)) dut1.expect(re.compile(r"Socket listening")) print("Starting the IPv6 test...") # Connect to TCP server on ESP using IPv6 address for res in socket.getaddrinfo(ip6_addr + "%ppp0", int(port_nr), socket.AF_INET6, socket.SOCK_STREAM, socket.SOL_TCP): af, socktype, proto, canonname, addr = res sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.connect(addr) sock.sendall(b"Espressif") sock.close() dut1.expect(re.compile(r"IPv6 test passed")) print("IPv6 test passed!") print("Starting the IPv4 test...") # Start the TCP server and wait for the ESP to connect with IPv4 address try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('', int(port_nr))) sock.listen(1) conn, addr = sock.accept() except socket.error as msg: print('Socket error: ' + str(msg[0]) + ': ' + msg[1]) raise timeout = time.time() + 60 while time.time() < timeout: data = conn.recv(128) if not data: break data = data.decode() print('Received data: ' + data) if data.startswith('Espressif'): conn.send(data.encode()) break conn.close() dut1.expect(re.compile(r"IPv4 test passed")) print("IPv4 test passed!") finally: server_stop.set() t.join() if __name__ == '__main__': test_examples_protocol_pppos_connect()
learn.py
#!/usr/bin/python3 import json import csv from random import shuffle import warnings import pickle import zlib import gzip import operator import time # import logging import math from io import StringIO from threading import Thread import functools import base64 import redis import multiprocessing from log import NewLogger logger = NewLogger("learn") import numpy from sklearn.feature_extraction import DictVectorizer from sklearn.pipeline import make_pipeline from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn import cluster, mixture from sklearn.neighbors import kneighbors_graph from naive_bayes import ExtendedNaiveBayes from naive_bayes2 import ExtendedNaiveBayes2 def timeout(timeout): def deco(func): @functools.wraps(func) def wrapper(*args, **kwargs): res = [Exception('function [%s] timeout [%s seconds] exceeded!' % ( func.__name__, timeout))] def newFunc(): try: res[0] = func(*args, **kwargs) except Exception as e: res[0] = e t = Thread(target=newFunc) t.daemon = True try: t.start() t.join(timeout) except Exception as je: raise je ret = res[0] if isinstance(ret, BaseException): raise ret return ret return wrapper return deco class AI(object): def __init__(self, family): # self.logger = logging.getLogger('learn.AI') self.logger = NewLogger("learn.AI") self.naming = {'from': {}, 'to': {}} self.family = family # self.path_to_data = path_to_data def classify(self, sensor_data): # print(self.header) # print(sensor_data) header = self.header[1:] is_unknown = True csv_data = numpy.zeros(len(header)) for sensorType in sensor_data['s']: if sensor_data['s'][sensorType]: for sensor in sensor_data['s'][sensorType]: sensorName = sensorType + "-" + sensor # print(sensorName, self.header) if sensorName in header: is_unknown = False csv_data[header.index(sensorName)] = sensor_data['s'][sensorType][sensor] self.headerClassify = header self.csv_dataClassify = csv_data.reshape(1, -1) payload = {'location_names': self.naming['to'], 'predictions': []} threads = [None]*len(self.algorithms) self.results = [None]*len(self.algorithms) for i, alg in enumerate(self.algorithms.keys()): threads[i] = Thread(target=self.do_classification, args=(i, alg)) threads[i].start() for i, _ in enumerate(self.algorithms.keys()): threads[i].join() # print(csv_data) # print(self.results) for result in self.results: if result != None: payload['predictions'].append(result) payload['is_unknown'] = is_unknown return payload def do_classification(self, index, name): """ header = ['wifi-a', 'wifi-b'] csv_data = [-67 0] """ if name == 'Gaussian Process': return t = time.time() try: prediction = self.algorithms[ name].predict_proba(self.csv_dataClassify) except Exception as e: logger.error(self.csv_dataClassify) logger.error(str(e)) return predict = {} for i, pred in enumerate(prediction[0]): predict[i] = pred predict_payload = {'name': name, 'locations': [], 'probabilities': []} badValue = False for tup in sorted(predict.items(), key=operator.itemgetter(1), reverse=True): predict_payload['locations'].append(str(tup[0])) predict_payload['probabilities'].append( round(float(tup[1]), 2)) if math.isnan(tup[1]): badValue = True break if badValue: return # try: # t2 = time.time() # name = "Extended Naive Bayes" # clf = ExtendedNaiveBayes(self.family,path_to_data=self.path_to_data) # predictions = clf.predict_proba(header,csv_data) # predict_payload = {'name': name,'locations': [], 'probabilities': []} # for tup in predictions: # predict_payload['locations'].append(str(self.naming['from'][tup[0]])) # predict_payload['probabilities'].append(round(tup[1],2)) # payload['predictions'].append(predict_payload) # self.logger.debug("{} {:d} ms".format(name,int(1000 * (t2 - time.time())))) # except Exception as e: # self.logger.error(str(e)) # try: # t2 = time.time() # name = "Extended Naive Bayes2" # clf = ExtendedNaiveBayes2(self.family, path_to_data=self.path_to_data) # predictions = clf.predict_proba(header, csv_data) # predict_payload = {'name': name, 'locations': [], 'probabilities': []} # for tup in predictions: # predict_payload['locations'].append( # str(self.naming['from'][tup[0]])) # predict_payload['probabilities'].append(round(tup[1], 2)) # payload['predictions'].append(predict_payload) # self.logger.debug("{} {:d} ms".format( # name, int(1000 * (t2 - time.time())))) # except Exception as e: # self.logger.error(str(e)) # self.logger.debug("{} {:d} ms".format( # name, int(1000 * (t - time.time())))) self.results[index] = predict_payload @timeout(10) def train(self, clf, x, y): return clf.fit(x, y) def learn(self, fname, file_data=None): csvfile = None if file_data: # base64 and gzipped file data = base64.b64decode(file_data) # data = zlib.decompress(data, 16 + zlib.MAX_WBITS) data = gzip.decompress(data) csvfile = StringIO(data.decode('utf-8')) else: csvfile = open(fname, 'r') t = time.time() # load CSV file self.header = [] rows = [] naming_num = 0 # with open(fname, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',') for i, row in enumerate(reader): self.logger.debug(row) if i == 0: self.header = row else: for j, val in enumerate(row): if j == 0: # this is a name of the location if val not in self.naming['from']: self.naming['from'][val] = naming_num self.naming['to'][naming_num] = val naming_num += 1 row[j] = self.naming['from'][val] continue if val == '': row[j] = 0 continue try: row[j] = float(val) except: self.logger.error( "problem parsing value " + str(val)) rows.append(row) csvfile.close() # first column in row is the classification, Y y = numpy.zeros(len(rows)) x = numpy.zeros((len(rows), len(rows[0]) - 1)) # shuffle it up for training record_range = list(range(len(rows))) shuffle(record_range) for i in record_range: y[i] = rows[i][0] x[i, :] = numpy.array(rows[i][1:]) names = [ "Nearest Neighbors", "Linear SVM", "RBF SVM", # "Gaussian Process", "Decision Tree", "Random Forest", "Neural Net", "AdaBoost", "Naive Bayes", "QDA"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025, probability=True), SVC(gamma=2, C=1, probability=True), # GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True), DecisionTreeClassifier(max_depth=5), RandomForestClassifier( max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis()] self.algorithms = {} # split_for_learning = int(0.70 * len(y)) for name, clf in zip(names, classifiers): t2 = time.time() self.logger.debug("learning {}".format(name)) try: self.algorithms[name] = self.train(clf, x, y) # score = self.algorithms[name].score(x,y) # logger.debug(name, score) self.logger.debug("learned {}, {:d} ms".format( name, int(1000 * (t2 - time.time())))) except Exception as e: self.logger.error("{} {}".format(name, str(e))) self.logger.debug("{:d} ms".format(int(1000 * (t - time.time())))) def save(self, save_file, redis_cache=False): if redis_cache: print(redis_cache) t = time.time() f = gzip.open(save_file, 'wb') pickle.dump(self.header, f) pickle.dump(self.naming, f) pickle.dump(self.algorithms, f) pickle.dump(self.family, f) f.close() self.logger.debug("{:d} ms".format(int(1000 * (t - time.time())))) def load(self, save_file, redis_cache=False): if redis_cache: print(redis_cache) t = time.time() f = gzip.open(save_file, 'rb') self.header = pickle.load(f) self.naming = pickle.load(f) self.algorithms = pickle.load(f) self.family = pickle.load(f) f.close() self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
client.py
import socket import threading import random clientA = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientA.connect(("129.236.142.190", 1234)) clientB = socket.socket(socket.AF_INET, socket.SOCK_STREAM) stop_thread = False methods = ['E91','BB84'] nickname = input("Choose a nickname: ") if nickname == "admin": password = input("Enter paswword for admin: ") #basis = input("#Please input your basis") def receive(): while True: global stop_thread if stop_thread: break try: msg = message = clientA.recv(1024).decode('ascii') if msg.startswith("NICK"): clientA.send(nickname.encode('ascii')) # port = int(clientA.recv(1024).decode('ascii')) port = int(msg[4:]) #print(port) clientB.connect(("129.236.142.190", port)) #clientB.send(nickname.encode('ascii')) #if next_message == 'PASS': # clientA.send(password.encode('ascii')) # if clientA.recv(1024).decode('ascii') == 'REFUSE': # print("Connection was refused! Wrong passsword!") # stop_thread = True #elif next_message == 'BAN': # print("Connection refused because of ban") # clientA.close() # stop_thread = True if msg == "BASIS": pass else: print(message) except: print("An error occurred!") clientA.close() clientB.close() break def write(): while True: if stop_thread: break message = '<{}>: {}'.format(nickname, input('')) if message[len(nickname)+4:].startswith('/'): if message[len(nickname)+5:].startswith('EXIT'): clientA.send(f'EXIT {message[1:len(nickname)+1]}'.encode('ascii')) elif(nickname=='ALICE'): method = methods[random.getrandbits(1)] clientB.send(f'BASIS|ALICE\nMETHOD {method}\n{message[1:len(nickname)+1]} bitstring {rand_key(10)}\n{message[1:len(nickname)+1]} basis {rand_key(10)}'.encode('ascii')) elif(nickname=='BOB'): clientB.send(f'KEY|{message[1:len(nickname)+1]}\nBOB basis {rand_key(10)}'.encode('ascii')) #else: #clientB.send(f'INTERFACE {nickname+"|"+message[len(nickname)+5:]}'.encode('ascii')) else: clientA.send(message.encode('ascii')) def rand_key(p): # Variable to store the # string key1 = "" # Loop to find the string # of desired length for i in range(p): # randint function to generate # 0, 1 randomly and converting # the result into str temp = str(random.randint(0, 1)) # Concatenation the random 0, 1 # to the final result key1 += temp return(key1) #n = 7 #str1 = rand_key(n) #print("Desired length random binary string is: ", str1) write_thread = threading.Thread(target = write) write_thread.start() receive_thread = threading.Thread(target = receive) receive_thread.start()
8.flask_multiple_session_impliment.py
import subprocess from selenium.webdriver import Chrome import pywebio import template import time import util from pywebio.input import * from pywebio.output import * from pywebio.utils import to_coroutine, run_as_function def target(): template.basic_output() template.background_output() run_as_function(template.basic_input()) actions(buttons=['Continue']) template.background_input() async def async_target(): template.basic_output() await template.coro_background_output() await to_coroutine(template.basic_input()) await actions(buttons=['Continue']) await template.coro_background_input() def test(server_proc: subprocess.Popen, browser: Chrome): template.test_output(browser) time.sleep(1) template.test_input(browser) time.sleep(1) template.save_output(browser, '8.flask_multiple_session_impliment_p1.html') browser.get('http://localhost:8080?_pywebio_debug=1&pywebio_api=io2&_pywebio_http_pull_interval=400') template.test_output(browser) time.sleep(1) template.test_input(browser) time.sleep(1) template.save_output(browser, '8.flask_multiple_session_impliment_p2.html') def start_test_server(): pywebio.enable_debug() from flask import Flask, send_from_directory from pywebio.platform.flask import webio_view, run_event_loop from pywebio import STATIC_PATH import threading import logging app = Flask(__name__) app.add_url_rule('/io', 'webio_view', webio_view(target), methods=['GET', 'POST', 'OPTIONS']) app.add_url_rule('/io2', 'webio_view_async_target', webio_view(async_target), methods=['GET', 'POST', 'OPTIONS']) @app.route('/') @app.route('/<path:static_file>') def serve_static_file(static_file='index.html'): return send_from_directory(STATIC_PATH, static_file) threading.Thread(target=run_event_loop, daemon=True).start() logging.getLogger('werkzeug').setLevel(logging.WARNING) app.run(port=8080, host='127.0.0.1') if __name__ == '__main__': util.run_test(start_test_server, test, address='http://localhost:8080?_pywebio_debug=1&_pywebio_http_pull_interval=400')
TServer.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from six.moves import queue import os import sys import threading import traceback import logging logger = logging.getLogger(__name__) from thrift.Thrift import TProcessor from thrift.protocol import TBinaryProtocol from thrift.transport import TTransport class TServer: """Base interface for a server, which must have a serve() method. Three constructors for all servers: 1) (processor, serverTransport) 2) (processor, serverTransport, transportFactory, protocolFactory) 3) (processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory) """ def __init__(self, *args): if (len(args) == 2): self.__initArgs__(args[0], args[1], TTransport.TTransportFactoryBase(), TTransport.TTransportFactoryBase(), TBinaryProtocol.TBinaryProtocolFactory(), TBinaryProtocol.TBinaryProtocolFactory()) elif (len(args) == 4): self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3]) elif (len(args) == 6): self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5]) def __initArgs__(self, processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory): self.processor = processor self.serverTransport = serverTransport self.inputTransportFactory = inputTransportFactory self.outputTransportFactory = outputTransportFactory self.inputProtocolFactory = inputProtocolFactory self.outputProtocolFactory = outputProtocolFactory def serve(self): pass class TSimpleServer(TServer): """Simple single-threaded server that just pumps around one transport.""" def __init__(self, *args): TServer.__init__(self, *args) def serve(self): self.serverTransport.listen() while True: client = self.serverTransport.accept() if not client: continue itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() class TThreadedServer(TServer): """Threaded server that spawns a new thread per each connection.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.daemon = kwargs.get("daemon", False) def serve(self): self.serverTransport.listen() while True: try: client = self.serverTransport.accept() if not client: continue t = threading.Thread(target=self.handle, args=(client,)) t.setDaemon(self.daemon) t.start() except KeyboardInterrupt: raise except Exception as x: logger.exception(x) def handle(self, client): itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() class TThreadPoolServer(TServer): """Server with a fixed size pool of threads which service requests.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.clients = queue.Queue() self.threads = 10 self.daemon = kwargs.get("daemon", False) def setNumThreads(self, num): """Set the number of worker threads that should be created""" self.threads = num def serveThread(self): """Loop around getting clients from the shared queue and process them.""" while True: try: client = self.clients.get() self.serveClient(client) except Exception as x: logger.exception(x) def serveClient(self, client): """Process input/output from a client for as long as possible""" itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() def serve(self): """Start a fixed number of worker threads and put client into a queue""" for i in range(self.threads): try: t = threading.Thread(target=self.serveThread) t.setDaemon(self.daemon) t.start() except Exception as x: logger.exception(x) # Pump the socket for clients self.serverTransport.listen() while True: try: client = self.serverTransport.accept() if not client: continue self.clients.put(client) except Exception as x: logger.exception(x) class TForkingServer(TServer): """A Thrift server that forks a new process for each request This is more scalable than the threaded server as it does not cause GIL contention. Note that this has different semantics from the threading server. Specifically, updates to shared variables will no longer be shared. It will also not work on windows. This code is heavily inspired by SocketServer.ForkingMixIn in the Python stdlib. """ def __init__(self, *args): TServer.__init__(self, *args) self.children = [] def serve(self): def try_close(file): try: file.close() except IOError as e: logger.warning(e, exc_info=True) self.serverTransport.listen() while True: client = self.serverTransport.accept() if not client: continue try: pid = os.fork() if pid: # parent # add before collect, otherwise you race w/ waitpid self.children.append(pid) self.collect_children() # Parent must close socket or the connection may not get # closed promptly itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) try_close(itrans) try_close(otrans) else: itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) ecode = 0 try: try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException: pass except Exception as e: logger.exception(e) ecode = 1 finally: try_close(itrans) try_close(otrans) os._exit(ecode) except TTransport.TTransportException: pass except Exception as x: logger.exception(x) def collect_children(self): while self.children: try: pid, status = os.waitpid(0, os.WNOHANG) except os.error: pid = None if pid: self.children.remove(pid) else: break
tcp_forward.py
# -*- coding: utf-8 -*- # 2020/7/10 # create by: snower import time import struct import random import traceback import logging import argparse from collections import deque import threading import signal import socket import sevent BYTES_MAP = {"B": 1, "K": 1024, "M": 1024*1024, "G": 1024*1024*1024, "T": 1024*1024*1024*1024} def config_signal(): signal.signal(signal.SIGINT, lambda signum, frame: sevent.current().stop()) signal.signal(signal.SIGTERM, lambda signum, frame: sevent.current().stop()) def format_data_len(date_len): if date_len < 1024: return "%dB" % date_len elif date_len < 1024*1024: return "%.3fK" % (date_len/1024.0) elif date_len < 1024*1024*1024: return "%.3fM" % (date_len/(1024.0*1024.0)) elif date_len < 1024*1024*1024*1024: return "%.3fG" % (date_len/(1024.0*1024.0*1024.0)) return "%.3fT" % (date_len/(1024.0*1024.0*1024.0*1024.0)) def host_parse(host): hosts, subnet, cs = [], ["", 0], [] is_brackets, is_subnet = False, False for c in host: if c == ":": if not is_brackets and not is_subnet: hosts.append("".join(cs)) cs = [] else: cs.append(c) elif c == "[": is_brackets = True elif c == "]": is_brackets = False elif c == "|": hosts.append("".join(cs)) cs = [] is_subnet = True elif c == "/": subnet[0] = "".join(cs) cs = [] else: cs.append(c) if is_subnet: if subnet[0]: subnet[1] = int("".join(cs) if cs else "32") else: subnet = ["".join(cs), 32] else: hosts.append("".join(cs)) subnet = ["0.0.0.0", 0] if len(hosts) == 2: if hosts[0].isdigit() and hosts[1].isdigit(): hosts = [("0.0.0.0", int(hosts[0])), ("127.0.0.1", int(hosts[1]))] elif hosts[0] == "": hosts = [("0.0.0.0", 0), ("127.0.0.1", int(hosts[1]))] else: hosts = [("0.0.0.0", 0), (hosts[0]), int(hosts[1])] elif len(hosts) == 3: if hosts[0].isdigit(): hosts = [("0.0.0.0", int(hosts[0])), (hosts[1], int(hosts[2]))] elif hosts[0] == "": hosts = [("0.0.0.0", 0), (hosts[1], int(hosts[2]))] elif hosts[2].isdigit(): hosts = [(hosts[0], int(hosts[1])), ("127.0.0.1", int(hosts[2]))] else: hosts = [(hosts[0], int(hosts[1])), ("127.0.0.1", 0)] elif len(hosts) == 4: hosts = [(hosts[0], int(hosts[1])), (hosts[2], int(hosts[3]))] else: raise ValueError(u"host error %s", host) try: subnet[0] = struct.unpack("!I", socket.inet_pton(socket.AF_INET, subnet[0]))[0] subnet[1] = ~ (0xffffffff >> subnet[1]) except: subnet[0] = (struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, subnet[0]))) subnet[1] = (~ (0xffffffffffffffff >> min(subnet[1], 64)), ~ (0xffffffffffffffff >> max(subnet[1] - 64, 0))) return hosts, subnet def is_subnet(ip, subnet): try: ip = struct.unpack("!I", socket.inet_pton(socket.AF_INET, ip))[0] if isinstance(subnet[0], tuple) or isinstance(subnet[1], tuple): return False return (ip & subnet[1]) == (subnet[0] & subnet[1]) except: ip = (struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip))) if not isinstance(subnet[0], tuple) or len(subnet[0]) != 2 or not isinstance(subnet[1], tuple) or len(subnet[1]) != 2: return False return ((ip[0] & subnet[1][0]) == (subnet[0][0] & subnet[1][0])) and ((ip[1] & subnet[1][1]) == (subnet[0][1] & subnet[1][1])) def warp_write(conn, status, key): origin_write = conn.write def _(data): status[key] += len(data) return origin_write(data) return _ def warp_speed_limit_write(conn, status, key): conn_id = id(conn) origin_write = conn.write origin_end = conn.end speed_limiter = status["speed_limiter"] current_speed_key = "recv_current_speed" if key == "recv_len" else "send_current_speed" end_key = "recv_is_end" if key == "recv_len" else "send_is_end" status[end_key] = False buffer = sevent.buffer.Buffer() def warp_end(): if conn_id not in speed_limiter.buffers: return origin_end() status[end_key] = True conn.end = warp_end def speed_write(data, speed): dl = len(data) if dl > speed: if speed <= 0: if speed_limiter.current_speed <= 0: return speed = min(speed_limiter.current_speed, speed_limiter.speed) if dl > speed: status[key] += buffer.fetch(data, speed) speed_limiter.current_speed -= speed else: status[key] += dl speed_limiter.current_speed -= dl else: status[key] += buffer.fetch(data, speed) try: return origin_write(buffer) except sevent.tcp.SocketClosed: speed_limiter.buffers.pop(conn_id, None) if status[end_key]: status[end_key] = False sevent.current().add_async(origin_end) return False if not data: speed_limiter.buffers.pop(conn_id, None) if status[end_key]: status[end_key] = False sevent.current().add_async(origin_end) return True status[key] += dl try: return origin_write(data) except sevent.tcp.SocketClosed: return False def _(data): if conn_id in speed_limiter.buffers: current_speed = status[current_speed_key] if current_speed <= 0: return if speed_limiter.global_speed: if speed_limiter.current_speed <= 0: return dl = len(data) speed = min(speed_limiter.current_speed, current_speed) if dl > speed: status[key] += buffer.fetch(data, speed) speed_limiter.current_speed -= speed status[current_speed_key] -= speed return origin_write(buffer) status[key] += dl speed_limiter.current_speed -= dl status[current_speed_key] -= dl return origin_write(data) dl = len(data) if dl > current_speed: status[key] += buffer.fetch(data, current_speed) status[current_speed_key] = 0 return origin_write(buffer) status[key] += dl status[current_speed_key] -= dl return origin_write(data) speed_limiter.buffers[conn_id] = (data, speed_write) if not speed_limiter.is_running: speed_limiter.is_running = True sevent.current().call_async(speed_limiter.loop) dl = len(data) if speed_limiter.global_speed: if speed_limiter.current_speed <= 0: status[current_speed_key] = 0 return speed = min(speed_limiter.current_speed, speed_limiter.speed) if dl > speed: status[key] += buffer.fetch(data, speed) speed_limiter.current_speed -= speed status[current_speed_key] = 0 return origin_write(buffer) status[key] += dl speed_limiter.current_speed -= dl status[current_speed_key] = speed_limiter.speed - dl return origin_write(data) if dl > speed_limiter.speed: status[key] += buffer.fetch(data, speed_limiter.speed) status[current_speed_key] = 0 return origin_write(buffer) status[key] += dl status[current_speed_key] = speed_limiter.speed - dl return origin_write(data) return _ def warp_delay_write(delayer, warp_write_func): def _(conn, status, key): origin_write = warp_write_func(conn, status, key) buffer = sevent.buffer.Buffer() key = "delay_%s_buffer_len" % id(buffer) status[key] = 0 def delay_write(data, data_len): status[key] -= buffer.fetch(data, data_len) try: return origin_write(buffer) except sevent.tcp.SocketClosed: return False def __(data): data_len = max(len(data) - status[key], 0) if delayer.delay: delayer.queues.append((time.time() + delayer.delay, data, data_len, delay_write)) else: delayer.queues.append((time.time() + random.randint(*delayer.rdelays) / 1000000.0, data, data_len, delay_write)) status[key] += data_len if delayer.is_running: return delayer.is_running = True sevent.current().call_async(delayer.loop) return __ return _ def warp_mirror_write(mirror_host, mirror_header, warp_write_func): try: mirror_host, subnet = host_parse(mirror_host) up_address, down_address = mirror_host[0], mirror_host[1] if up_address[0] == "0.0.0.0": up_address = ("127.0.0.1", up_address[1]) if down_address[0] == "0.0.0.0": down_address = ("127.0.0.1", down_address[1]) except ValueError: logging.error("mirror address error %s", mirror_host) return warp_write_func mirror_header = mirror_header.replace("{", "%(").replace("}", ")s") def warp_up_conn_write(conn, status, key): origin_write = conn.write up_buffer = sevent.buffer.Buffer() def up_conn_write(data): try: if "mirror_up_conn" not in status: if up_address == down_address and status.get("mirror_down_conn"): status['mirror_up_conn'] = status.get("mirror_down_conn") else: mirror_conn = sevent.tcp.Socket() mirror_conn.enable_nodelay() mirror_conn.connect(up_address, 5) mirror_conn.on_connect(lambda s: mirror_conn.end() if conn._state == sevent.tcp.STATE_CLOSED else None) mirror_conn.on_data(lambda s, b: b.read()) conn.on_close(lambda s: mirror_conn.end() if mirror_conn._state != sevent.tcp.STATE_CONNECTING else None) status['mirror_up_conn'] = mirror_conn try: if mirror_header: up_buffer.write((mirror_header % status["mirror_variables"]).encode("utf-8")) except: pass logging.info("mirror up copy to %s:%s", up_address[0], up_address[1]) up_buffer.copyfrom(data) status['mirror_up_conn'].write(up_buffer) except: pass return origin_write(data) return up_conn_write def warp_down_conn_write(conn, status, key): origin_write = conn.write down_buffer = sevent.buffer.Buffer() def down_conn_write(data): try: if "mirror_down_conn" not in status: if up_address == down_address and status.get("mirror_up_conn"): status['mirror_down_conn'] = status.get("mirror_up_conn") else: mirror_conn = sevent.tcp.Socket() mirror_conn.enable_nodelay() mirror_conn.connect(down_address, 5) mirror_conn.on_connect(lambda s: mirror_conn.end() if conn._state == sevent.tcp.STATE_CLOSED else None) mirror_conn.on_data(lambda s, b: b.read()) conn.on_close(lambda s: mirror_conn.end() if mirror_conn._state != sevent.tcp.STATE_CONNECTING else None) status['mirror_down_conn'] = mirror_conn try: if mirror_header: down_buffer.write((mirror_header % status["mirror_variables"]).encode("utf-8")) except Exception as e: pass logging.info("mirror down copy to %s:%s", down_address[0], down_address[1]) down_buffer.copyfrom(data) status['mirror_down_conn'].write(down_buffer) except: pass return origin_write(data) return down_conn_write def _(conn, status, key): if mirror_header and "mirror_variables" not in status: status["mirror_variables"] = {"conn_id": "", "from_host": "", "from_port": 0, "to_host": "", "to_port": 0} if key == "send_len": if "mirror_subnet" not in status or status["mirror_subnet"]: if up_address: conn.write = warp_up_conn_write(conn, status, key) if mirror_header: status["mirror_variables"]["to_host"] = conn.address[0] status["mirror_variables"]["to_port"] = conn.address[1] else: status.pop("mirror_variables", None) if key == "recv_len": if subnet and not is_subnet(conn.address[0], subnet): status["mirror_subnet"] = False if "mirror_subnet" not in status or status["mirror_subnet"]: if down_address: conn.write = warp_down_conn_write(conn, status, key) if mirror_header: status["mirror_variables"]["conn_id"] = id(conn) status["mirror_variables"]["from_host"] = conn.address[0] status["mirror_variables"]["from_port"] = conn.address[1] else: status.pop("mirror_variables", None) return warp_write_func(conn, status, key) return _ async def tcp_forward(conns, conn, forward_address, subnet, status): start_time = time.time() conn.write, pconn = warp_write(conn, status, "recv_len"), None try: conn.enable_nodelay() pconn = sevent.tcp.Socket() pconn.enable_nodelay() await pconn.connectof(forward_address) pconn.write = warp_write(pconn, status, "send_len") logging.info("tcp forward connected %s:%d -> %s:%d", conn.address[0], conn.address[1], forward_address[0], forward_address[1]) await pconn.linkof(conn) except sevent.errors.SocketClosed: pass except Exception as e: logging.info("tcp forward error %s:%d -> %s:%d %s %.2fms\r%s", conn.address[0], conn.address[1], forward_address[0], forward_address[1], e, (time.time() - start_time) * 1000, traceback.format_exc()) return finally: conn.close() if pconn: pconn.close() conns.pop(id(conn), None) logging.info("tcp forward closed %s:%d -> %s:%d %s %s %.2fms", conn.address[0], conn.address[1], forward_address[0], forward_address[1], format_data_len(status["send_len"]), format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) async def tcp_forward_server(conns, server, forward_hosts, speed_limiter): while True: conn = await server.accept() conn_ip, forward_address, subnet = conn.address[0], None, None for a, s in forward_hosts: if is_subnet(conn_ip, s): forward_address, subnet = a, s break if not forward_address: conn.close() logging.info("tcp forward subnet fail %s:%d", conn.address[0], conn.address[1]) return status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0, "speed_limiter": speed_limiter} sevent.current().call_async(tcp_forward, conns, conn, forward_address, subnet, status) conns[id(conn)] = (conn, status) async def check_timeout(conns, timeout): def run_check(): while True: try: now = time.time() for conn_id, (conn, status) in list(conns.items()): if status['check_recv_len'] != status['recv_len'] or status['check_send_len'] != status['send_len']: status["check_recv_len"] = status["recv_len"] status["check_send_len"] = status["send_len"] status['last_time'] = now continue if now - status['last_time'] >= timeout: sevent.current().add_async_safe(conn.close) conns.pop(conn_id, None) finally: time.sleep(min(float(timeout) / 2.0, 30)) if timeout > 0: check_thread = threading.Thread(target=run_check) check_thread.setDaemon(True) check_thread.start() await sevent.Future() class Delayer(object): def __init__(self, delay, rdelays): self.delay = delay self.rdelays = rdelays self.is_running = False self.queues = deque() async def loop(self): try: while self.queues: now = time.time() timeout_time, data, data_len, callback = self.queues.popleft() if timeout_time > now: await sevent.current().sleep(timeout_time - now) sevent.current().add_async(callback, data, data_len) finally: self.is_running = False class SpeedLimiter(object): def __init__(self, speed, global_speed): self.speed = int(speed) or int(global_speed) self.global_speed = int(global_speed) self.current_speed = self.global_speed if self.global_speed else 0 self.buffers = {} self.is_running = False self.loop = self.loop_global_speed if self.global_speed else self.loop_speed async def loop_speed(self): try: current_timestamp = time.time() await sevent.current().sleep(0.1) while self.buffers: try: for _, (data, callback) in list(self.buffers.items()): sevent.current().add_async(callback, data, self.speed) finally: now = time.time() sleep_time = 0.2 - (now - current_timestamp) current_timestamp = now await sevent.current().sleep(sleep_time) finally: self.is_running = False async def loop_global_speed(self): try: current_timestamp = time.time() await sevent.current().sleep(0.1) while self.buffers: try: avg_speed = int(self.global_speed / len(self.buffers)) max_speed, over_speed = min(avg_speed, self.speed), 0 speed_buffers = [] for _, (data, callback) in self.buffers.items(): dl = len(data) if max_speed > dl: speed_buffers.append((data, dl, dl, callback)) over_speed += avg_speed - dl else: speed_buffers.append((data, dl, max_speed, callback)) over_speed += avg_speed - max_speed for data, dl, speed, callback in speed_buffers: if over_speed > 0 and dl > speed and speed < self.speed: can_speed = min(min(dl, self.speed) - speed, over_speed) speed += can_speed over_speed -= can_speed sevent.current().add_async(callback, data, speed) self.current_speed = over_speed finally: now = time.time() sleep_time = 0.2 - (now - current_timestamp) current_timestamp = now await sevent.current().sleep(sleep_time) finally: self.current_speed = self.global_speed self.is_running = False async def tcp_forward_servers(servers, timeout, speed, global_speed): conns, speed_limiter = {}, (SpeedLimiter(speed, global_speed) if speed or global_speed else None) for server, forward_hosts in servers: sevent.current().call_async(tcp_forward_server, conns, server, forward_hosts, speed_limiter) await check_timeout(conns, timeout) if __name__ == '__main__': parser = argparse.ArgumentParser(description="tcp port forward") parser.add_argument('-L', dest='forwards', default=[], action="append", type=str, help='forward host, accept format [[local_bind:]local_port:remote_host:remote_port]|[subnet], support muiti forward args (example: 0.0.0.0:80:127.0.0.1:8088 or 80:192.168.0.2:8088|192.168.0.0/24)') parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') parser.add_argument('-s', dest='speed', default=0, type=lambda v: int(float(v[:-1]) * BYTES_MAP[v.upper()[-1]]) \ if v and v.upper()[-1] in BYTES_MAP else int(float(v)), help='per connection speed limit byte, example: 1024 or 1M (default: 0 is unlimit), available units : B K M G T') parser.add_argument('-S', dest='global_speed', default=0, type=lambda v: int(float(v[:-1]) * BYTES_MAP[v.upper()[-1]]) \ if v and v.upper()[-1] in BYTES_MAP else int(float(v)), help='global speed limit byte, example: 1024 or 1M (default: 0 is unlimit), available units : B K M G T') parser.add_argument('-d', dest='delay', default=0, type=lambda v: (float(v.split("-")[0]), float(v.split("-")[-1])) \ if v and isinstance(v, str) and "-" in v else float(v), help='delay millisecond (default: 0 is not delay, example -d100 or -d100-200), the - between two numbers will be random delay') parser.add_argument('-M', dest='mirror_host', default="", type=str, help='mirror host, accept format [[up_host:]up_port:[down_host]:down_port] (example: 0.0.0.0:80:127.0.0.1:8088 or :127.0.0.1:8088 or 127.0.0.1:8088: or 8088:8088)') parser.add_argument('-F', dest='mirror_header', default="", type=str, help='mirror header, accept variables [from_host|from_port|to_host|to_port|conn_id] (example: "{conn_id}-{from_host}:{from_port}->{to_host}:{to_port}\\r\\n")') args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') config_signal() if not args.forwards: exit(0) forwards = {} for forward in args.forwards: hosts, subnet = host_parse(forward) if hosts[0] not in forwards: forwards[hosts[0]] = [] forwards[hosts[0]].append((hosts[1], subnet)) if not forwards: exit(0) if args.speed or args.global_speed: warp_write = warp_speed_limit_write if args.delay: warp_write = warp_delay_write(Delayer(0 if isinstance(args.delay, tuple) else float(args.delay) / 1000.0, (int(args.delay[0] * 1000), int(args.delay[1] * 1000)) if isinstance(args.delay, tuple) else None), warp_write) if args.mirror_host: warp_write = warp_mirror_write(args.mirror_host, args.mirror_header, warp_write) forward_servers = [] for bind_address, forward_hosts in forwards.items(): server = sevent.tcp.Server() server.enable_reuseaddr() server.listen(bind_address) forward_servers.append((server, sorted(forward_hosts, key=lambda x: x[1][1][0] * 0xffffffffffffffff + x[1][1][1] if isinstance(x[1][1], tuple) else x[1][1], reverse=True))) logging.info("port forward listen %s:%s", bind_address[0], bind_address[1]) try: sevent.run(tcp_forward_servers, forward_servers, args.timeout, int(args.speed / 10), int(args.global_speed / 10)) except KeyboardInterrupt: for server, _ in forward_servers: try: server.close() except: pass exit(0)
scheduler.py
# coding=utf-8 """Module that provides a cron-like task scheduler. This task scheduler is designed to be used from inside your own program. You can schedule Python functions to be called at specific intervals or days. It uses the standard 'sched' module for the actual task scheduling, but provides much more: * repeated tasks (at intervals, or on specific days) * error handling (exceptions in tasks don't kill the scheduler) * optional to run scheduler in its own thread or separate process * optional to run a task in its own thread or separate process If the threading module is available, you can use the various Threaded variants of the scheduler and associated tasks. If threading is not available, you could still use the forked variants. If fork is also not available, all processing is done in a single process, sequentially. There are three Scheduler classes: Scheduler ThreadedScheduler ForkedScheduler You usually add new tasks to a scheduler using the add_interval_task or add_daytime_task methods, with the appropriate processmethod argument to select sequential, threaded or forked processing. NOTE: it is impossible to add new tasks to a ForkedScheduler, after the scheduler has been started! For more control you can use one of the following Task classes and use schedule_task or schedule_task_abs: IntervalTask ThreadedIntervalTask ForkedIntervalTask SingleTask ThreadedSingleTask ForkedSingleTask WeekdayTask ThreadedWeekdayTask ForkedWeekdayTask MonthdayTask ThreadedMonthdayTask ForkedMonthdayTask Kronos is the Greek God of Time. Kronos scheduler (c) Irmen de Jong. This version has been extracted from the Turbogears source repository and slightly changed to be completely stand-alone again. Also some fixes have been made to make it work on Python 2.6 (sched module changes). The version in Turbogears is based on the original stand-alone Kronos. This is open-source software, released under the MIT Software License: http://www.opensource.org/licenses/mit-license.php """ __version__ = "2.0" __all__ = [ "DayTaskRescheduler", "ForkedIntervalTask", "ForkedMonthdayTask", "ForkedScheduler", "ForkedSingleTask", "ForkedTaskMixin", "ForkedWeekdayTask", "IntervalTask", "MonthdayTask", "Scheduler", "SingleTask", "Task", "ThreadedIntervalTask", "ThreadedMonthdayTask", "ThreadedScheduler", "ThreadedSingleTask", "ThreadedTaskMixin", "ThreadedWeekdayTask", "WeekdayTask", ] import os import sys import sched import time import logging import traceback import weakref class method: sequential = "sequential" forked = "forked" threaded = "threaded" class Scheduler: """The Scheduler itself.""" def __init__(self): self.running = True self.log = logging.getLogger('diamond') self.sched = sched.scheduler(time.time, self.__delayfunc) def __delayfunc(self, delay): # This delay function is basically a time.sleep() that is # divided up, so that we can check the self.running flag while # delaying. There is an additional check in here to ensure that the # top item of the queue hasn't changed if delay < 10: time.sleep(delay) else: toptime = self._getqueuetoptime() endtime = time.time() + delay period = 5 stoptime = endtime - period while (self.running and stoptime > time.time() and self._getqueuetoptime() == toptime): time.sleep(period) if not self.running or self._getqueuetoptime() != toptime: return now = time.time() if endtime > now: time.sleep(endtime - now) def _acquire_lock(self): pass def _release_lock(self): pass def add_interval_task(self, action, taskname, initialdelay, interval, processmethod, args, kw, abs=False): """Add a new Interval Task to the schedule. A very short initialdelay or one of zero cannot be honored, you will see a slight delay before the task is first executed. This is because the scheduler needs to pick it up in its loop. """ if initialdelay < 0 or interval < 1: raise ValueError("Delay or interval must be >0") # Select the correct IntervalTask class. # Not all types may be available! if processmethod == method.sequential: TaskClass = IntervalTask elif processmethod == method.threaded: TaskClass = ThreadedIntervalTask elif processmethod == method.forked: TaskClass = ForkedIntervalTask else: raise ValueError("Invalid processmethod") if not args: args = [] if not kw: kw = {} task = TaskClass(taskname, interval, action, args, kw, abs) self.schedule_task(task, initialdelay) return task def add_single_task(self, action, taskname, initialdelay, processmethod, args, kw): """Add a new task to the scheduler that will only be executed once.""" if initialdelay < 0: raise ValueError("Delay must be >0") # Select the correct SingleTask class. Not all types may be available! if processmethod == method.sequential: TaskClass = SingleTask elif processmethod == method.threaded: TaskClass = ThreadedSingleTask elif processmethod == method.forked: TaskClass = ForkedSingleTask else: raise ValueError("Invalid processmethod") if not args: args = [] if not kw: kw = {} task = TaskClass(taskname, action, args, kw) self.schedule_task(task, initialdelay) return task def add_daytime_task(self, action, taskname, weekdays, monthdays, timeonday, processmethod, args, kw): """Add a new Day Task (Weekday or Monthday) to the schedule.""" if weekdays and monthdays: raise ValueError("You can only specify weekdays or monthdays, " "not both") if not args: args = [] if not kw: kw = {} if weekdays: # Select the correct WeekdayTask class. # Not all types may be available! if processmethod == method.sequential: TaskClass = WeekdayTask elif processmethod == method.threaded: TaskClass = ThreadedWeekdayTask elif processmethod == method.forked: TaskClass = ForkedWeekdayTask else: raise ValueError("Invalid processmethod") task = TaskClass(taskname, weekdays, timeonday, action, args, kw) if monthdays: # Select the correct MonthdayTask class. # Not all types may be available! if processmethod == method.sequential: TaskClass = MonthdayTask elif processmethod == method.threaded: TaskClass = ThreadedMonthdayTask elif processmethod == method.forked: TaskClass = ForkedMonthdayTask else: raise ValueError("Invalid processmethod") task = TaskClass(taskname, monthdays, timeonday, action, args, kw) firsttime = task.get_schedule_time(True) self.schedule_task_abs(task, firsttime) return task def schedule_task(self, task, delay): """Add a new task to the scheduler with the given delay (seconds). Low-level method for internal use. """ if self.running: # lock the sched queue, if needed self._acquire_lock() try: task.event = self.sched.enter(delay, 0, task, (weakref.ref(self),)) finally: self._release_lock() else: task.event = self.sched.enter(delay, 0, task, (weakref.ref(self),)) def schedule_task_abs(self, task, abstime): """Add a new task to the scheduler for the given absolute time value. Low-level method for internal use. """ if self.running: # lock the sched queue, if needed self._acquire_lock() try: task.event = self.sched.enterabs(abstime, 0, task, (weakref.ref(self),)) finally: self._release_lock() else: task.event = self.sched.enterabs(abstime, 0, task, (weakref.ref(self),)) def start(self): """Start the scheduler.""" self._run() def stop(self): """Remove all pending tasks and stop the Scheduler.""" self.running = False self._clearschedqueue() def cancel(self, task): """Cancel given scheduled task.""" self.sched.cancel(task.event) if sys.version_info >= (2, 6): # code for sched module of python 2.6+ def _getqueuetoptime(self): return self.sched._queue[0].time def _clearschedqueue(self): self.sched._queue[:] = [] else: # code for sched module of python 2.5 and older def _getqueuetoptime(self): return self.sched.queue[0][0] def _clearschedqueue(self): self.sched.queue[:] = [] def _run(self): # Low-level run method to do the actual scheduling loop. while self.running: try: self.sched.run() except Exception, x: self.log.error("ERROR DURING SCHEDULER EXECUTION %s \n %s", x, "".join(traceback.format_exception(*sys.exc_info()))) # queue is empty; sleep a short while before checking again if self.running: time.sleep(5) class Task: """Abstract base class of all scheduler tasks""" def __init__(self, name, action, args, kw): """This is an abstract class!""" self.name = name self.action = action self.args = args self.kw = kw self.log = logging.getLogger('diamond') def __call__(self, schedulerref): """Execute the task action in the scheduler's thread.""" try: self.execute() except Exception, x: self.handle_exception(x) self.reschedule(schedulerref()) def reschedule(self, scheduler): """This method should be defined in one of the sub classes!""" raise NotImplementedError("You're using the abstract class 'Task'," " use a concrete class instead") def execute(self): """Execute the actual task.""" self.action(*self.args, **self.kw) def handle_exception(self, exc): """Handle any exception that occured during task execution.""" self.log.error("ERROR DURING TASK EXECUTION %s \n %s", exc, "".join(traceback.format_exception(*sys.exc_info()))) class SingleTask(Task): """A task that only runs once.""" def reschedule(self, scheduler): pass class IntervalTask(Task): """A repeated task that occurs at certain intervals (in seconds).""" def __init__(self, name, interval, action, args=None, kw=None, abs=False): Task.__init__(self, name, action, args, kw) self.absolute = abs self.interval = interval self.duration = 0 def execute(self): """ Execute the actual task.""" start_time = time.time() self.action(*self.args, **self.kw) end_time = time.time() self.duration = int(end_time - start_time) def reschedule(self, scheduler): """Reschedule this task according to its interval (in seconds).""" if self.absolute and self.duration: if self.duration < self.interval: scheduler.schedule_task(self, self.interval - self.duration) else: scheduler.schedule_task(self, 0) else: scheduler.schedule_task(self, self.interval) class DayTaskRescheduler: """A mixin class that contains the reschedule logic for the DayTasks.""" def __init__(self, timeonday): self.timeonday = timeonday def get_schedule_time(self, today): """Calculate the time value at which this task is to be scheduled.""" now = list(time.localtime()) if today: # schedule for today. let's see if that is still possible if (now[3], now[4]) >= self.timeonday: # too bad, it will be tomorrow now[2] += 1 else: # tomorrow now[2] += 1 # set new time on day (hour,minute) now[3], now[4] = self.timeonday # seconds now[5] = 0 return time.mktime(now) def reschedule(self, scheduler): """Reschedule this task according to the daytime for the task. The task is scheduled for tomorrow, for the given daytime. """ # (The execute method in the concrete Task classes will check # if the current day is a day on which the task must run). abstime = self.get_schedule_time(False) scheduler.schedule_task_abs(self, abstime) class WeekdayTask(DayTaskRescheduler, Task): """A task that is called at specific days in a week (1-7), at a fixed time on the day. """ def __init__(self, name, weekdays, timeonday, action, args=None, kw=None): if type(timeonday) not in (list, tuple) or len(timeonday) != 2: raise TypeError("timeonday must be a 2-tuple (hour,minute)") if type(weekdays) not in (list, tuple): raise TypeError("weekdays must be a sequence of weekday numbers " "1-7 (1 is Monday)") DayTaskRescheduler.__init__(self, timeonday) Task.__init__(self, name, action, args, kw) self.days = weekdays def execute(self): # This is called every day, at the correct time. We only need to # check if we should run this task today (this day of the week). weekday = time.localtime().tm_wday + 1 if weekday in self.days: self.action(*self.args, **self.kw) class MonthdayTask(DayTaskRescheduler, Task): """A task that is called at specific days in a month (1-31), at a fixed time on the day. """ def __init__(self, name, monthdays, timeonday, action, args=None, kw=None): if type(timeonday) not in (list, tuple) or len(timeonday) != 2: raise TypeError("timeonday must be a 2-tuple (hour,minute)") if type(monthdays) not in (list, tuple): raise TypeError("monthdays must be a sequence of numbers 1-31") DayTaskRescheduler.__init__(self, timeonday) Task.__init__(self, name, action, args, kw) self.days = monthdays def execute(self): # This is called every day, at the correct time. We only need to # check if we should run this task today (this day of the month). if time.localtime().tm_mday in self.days: self.action(*self.args, **self.kw) try: import threading class ThreadedScheduler(Scheduler): """A Scheduler that runs in its own thread.""" def __init__(self): Scheduler.__init__(self) # we require a lock around the task queue self._lock = threading.Lock() def start(self): """Splice off a thread in which the scheduler will run.""" self.thread = threading.Thread(target=self._run) self.thread.setDaemon(True) self.thread.start() def stop(self): """Stop the scheduler and wait for the thread to finish.""" Scheduler.stop(self) try: self.thread.join() except AttributeError: pass def _acquire_lock(self): """Lock the thread's task queue.""" self._lock.acquire() def _release_lock(self): """Release the lock on th ethread's task queue.""" self._lock.release() class ThreadedTaskMixin: """A mixin class to make a Task execute in a separate thread.""" def __call__(self, schedulerref): """Execute the task action in its own thread.""" threading.Thread(target=self.threadedcall).start() self.reschedule(schedulerref()) def threadedcall(self): # This method is run within its own thread, so we have to # do the execute() call and exception handling here. try: self.execute() except Exception, x: self.handle_exception(x) class ThreadedIntervalTask(ThreadedTaskMixin, IntervalTask): """Interval Task that executes in its own thread.""" pass class ThreadedSingleTask(ThreadedTaskMixin, SingleTask): """Single Task that executes in its own thread.""" pass class ThreadedWeekdayTask(ThreadedTaskMixin, WeekdayTask): """Weekday Task that executes in its own thread.""" pass class ThreadedMonthdayTask(ThreadedTaskMixin, MonthdayTask): """Monthday Task that executes in its own thread.""" pass except ImportError: # threading is not available pass if hasattr(os, "fork"): import signal class ForkedScheduler(Scheduler): """A Scheduler that runs in its own forked process.""" def __del__(self): if hasattr(self, "childpid"): os.kill(self.childpid, signal.SIGKILL) def start(self): """Fork off a new process in which the scheduler will run.""" pid = os.fork() if pid == 0: # we are the child signal.signal(signal.SIGUSR1, self.signalhandler) self._run() os._exit(0) else: # we are the parent self.childpid = pid # can no longer insert in the scheduler queue del self.sched def stop(self): """Stop the scheduler and wait for the process to finish.""" os.kill(self.childpid, signal.SIGUSR1) os.waitpid(self.childpid, 0) def signalhandler(self, sig, stack): Scheduler.stop(self) class ForkedTaskMixin: """A mixin class to make a Task execute in a separate process.""" def __call__(self, schedulerref): """Execute the task action in its own process.""" pid = os.fork() if pid == 0: # we are the child try: self.execute() except Exception, x: self.handle_exception(x) os._exit(0) else: # we are the parent self.reschedule(schedulerref()) class ForkedIntervalTask(ForkedTaskMixin, IntervalTask): """Interval Task that executes in its own process.""" pass class ForkedSingleTask(ForkedTaskMixin, SingleTask): """Single Task that executes in its own process.""" pass class ForkedWeekdayTask(ForkedTaskMixin, WeekdayTask): """Weekday Task that executes in its own process.""" pass class ForkedMonthdayTask(ForkedTaskMixin, MonthdayTask): """Monthday Task that executes in its own process.""" pass if __name__ == "__main__": def testaction(arg): print ">>>TASK", arg, "sleeping 3 seconds" time.sleep(3) print "<<<END_TASK", arg s = ThreadedScheduler() s.add_interval_task(testaction, "test action 1", 0, 4, method.threaded, ["task 1"], None) s.start() print "Scheduler started, waiting 15 sec...." time.sleep(15) print "STOP SCHEDULER" s.stop() print "EXITING"
unit.py
import unittest import sys import os import re import time import shutil import tempfile import threading import errno sys.path.append(os.path.join("..", "src", "bin")) from web_ping import URLField, DurationField, WebPing, NTLMAuthenticationValueException from modular_input import Field, FieldValidationException from website_monitoring_rest_handler import HostFieldValidator from website_monitoring_app import requests from six import StringIO from unit_test_web_server import UnitTestWithWebServer, skipIfNoServer from test_proxy_server import get_server as get_proxy_server def skipIfNoProxyServer(func): def _decorator(self, *args, **kwargs): if not hasattr(self, 'proxy_address') or self.proxy_address is None: self.skipTest("No proxy address defined, proxy based test will not run") return elif not hasattr(self, 'proxy_port') or self.proxy_port is None: self.skipTest("No proxy port defined, proxy based test will not run") return elif not hasattr(self, 'proxy_type') or self.proxy_type is None: self.skipTest("No proxy type defined, proxy based test will not run") return else: return func(self, *args, **kwargs) return _decorator class WebsiteMonitoringAppTest(unittest.TestCase): DEFAULT_TEST_PROXY_SERVER_PORT = 21080 warned_about_no_proxyd = False proxyd = None proxy_address = None proxy_port = None proxy_type = None config_loaded = False def toInt(self, str_int): if str_int is None: return None else: return int(str_int) def loadConfig(self, properties_file=None): # Stop if we already loaded the configuration if WebsiteMonitoringAppTest.config_loaded: return # Load the port from the environment if possible. This might be get overridden by the local.properties file. WebsiteMonitoringAppTest.proxy_port = int(os.environ.get("TEST_PROXY_SERVER_PORT", WebsiteMonitoringAppTest.DEFAULT_TEST_PROXY_SERVER_PORT)) fp = None if properties_file is None: properties_file = os.path.join("..", "local.properties") try: fp = open(properties_file) except IOError: pass if fp is not None: regex = re.compile("(?P<key>[^=]+)[=](?P<value>.*)") settings = {} for l in fp.readlines(): r = regex.search(l) if r is not None: d = r.groupdict() settings[d["key"]] = d["value"] # Load the parameters from the WebsiteMonitoringAppTest.proxy_address = settings.get("value.test.proxy.address", WebsiteMonitoringAppTest.proxy_address) WebsiteMonitoringAppTest.proxy_port = self.toInt(settings.get("value.test.proxy.port", WebsiteMonitoringAppTest.proxy_port)) WebsiteMonitoringAppTest.proxy_type = settings.get("value.test.proxy.type", None) # If no proxy was defined, use the internal proxy server for testing if WebsiteMonitoringAppTest.proxyd is None and WebsiteMonitoringAppTest.proxy_address is None: WebsiteMonitoringAppTest.proxy_address = "127.0.0.1" WebsiteMonitoringAppTest.proxy_port = WebsiteMonitoringAppTest.proxy_port WebsiteMonitoringAppTest.proxy_type = "http" WebsiteMonitoringAppTest.proxyd = get_proxy_server(WebsiteMonitoringAppTest.proxy_port) def start_server(proxyd): proxyd.serve_forever() t = threading.Thread(target=start_server, args = (WebsiteMonitoringAppTest.proxyd,)) t.daemon = True t.start() # Note that we loaded the config already so that we don't try it again. WebsiteMonitoringAppTest.config_loaded = True def setUp(self): self.loadConfig() class TestHostFieldValidator(unittest.TestCase): def test_underscore_allowed(self): # http://lukemurphey.net/issues/1002 # http://answers.splunk.com/answers/233571/website-monitoring-is-not-working-with-proxy-setup.html validator = HostFieldValidator() self.assertTrue(validator.is_valid_hostname("my_proxy.localhost.com")) class TestURLField(unittest.TestCase): def test_url_field_valid(self): url_field = URLField("test_url_field_valid", "title", "this is a test") self.assertEqual(url_field.to_python("http://google.com").geturl(), "http://google.com") self.assertEqual(url_field.to_python("http://google.com/with/path").geturl(), "http://google.com/with/path") self.assertEqual(url_field.to_python("http://google.com:8080/with/port").geturl(), "http://google.com:8080/with/port") def test_url_field_invalid(self): url_field = URLField("test_url_field_invalid", "title", "this is a test") self.assertRaises(FieldValidationException, lambda: url_field.to_python("hxxp://google.com")) self.assertRaises(FieldValidationException, lambda: url_field.to_python("http://")) self.assertRaises(FieldValidationException, lambda: url_field.to_python("google.com")) class TestDurationField(unittest.TestCase): def test_duration_valid(self): duration_field = DurationField("test_duration_valid", "title", "this is a test") self.assertEqual(duration_field.to_python("1m"), 60) self.assertEqual(duration_field.to_python("5m"), 300) self.assertEqual(duration_field.to_python("5 minute"), 300) self.assertEqual(duration_field.to_python("5"), 5) self.assertEqual(duration_field.to_python("5h"), 18000) self.assertEqual(duration_field.to_python("2d"), 172800) self.assertEqual(duration_field.to_python("2w"), 86400 * 7 * 2) def test_url_field_invalid(self): duration_field = DurationField("test_url_field_invalid", "title", "this is a test") self.assertRaises(FieldValidationException, lambda: duration_field.to_python("1 treefrog")) self.assertRaises(FieldValidationException, lambda: duration_field.to_python("minute")) def skipIfNoServer(func): def _decorator(self, *args, **kwargs): if self.httpd is None: # Don't run the test if the server is not running self.skipTest("The web-server is not running") else: return func(self, *args, **kwargs) return _decorator class TestWebPing(WebsiteMonitoringAppTest, UnitTestWithWebServer): def setUp(self): super(TestWebPing, self).setUp() self.tmp_dir = tempfile.mkdtemp(prefix="TestWebPing") #os.makedirs(self.tmp_dir) def tearDown(self): shutil.rmtree(self.tmp_dir) def test_cleanup_threads(self): threads_running = [1, 2] max_runs = 10 def thread_function(n): total_time = 0 while n in threads_running and total_time < max_runs: time.sleep(1) total_time += 1 thread_1 = threading.Thread(target=thread_function, args=(1,)) thread_2 = threading.Thread(target=thread_function, args=(2,)) threads = { '1': thread_1, '2': thread_2 } thread_1.start() thread_2.start() web_ping = WebPing() self.assertEqual(len(threads), 2) self.assertEqual(web_ping.cleanup_threads(threads), 0) self.assertEqual(len(threads), 2) # Stop the first thread and wait for it to complete threads_running = [2] thread_1.join() self.assertEqual(web_ping.cleanup_threads(threads), 1) self.assertEqual(len(threads), 1) # Stop the second thread and wait for it to complete threads_running = [] thread_2.join() self.assertEqual(web_ping.cleanup_threads(threads), 1) self.assertEqual(len(threads), 0) def test_ping(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3) self.assertEqual(result.response_code, 200) self.assertGreater(result.request_time, 0) """ def test_ping_include_ip(self): url_field = URLField("test_ping_include_ip", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3) self.assertGreater(result.response_ip, '127.0.0.1') """ def test_ping_super_long_url(self): # https://answers.splunk.com/answers/488784/why-my-website-monitoring-only-check-1-time.html url_field = URLField("test_ping", "title", "this is a test") #result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page?s=superloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong"), timeout=3) result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page_superlooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong"), timeout=3) self.assertEqual(result.response_code, 200) self.assertGreater(result.request_time, 0) def test_ping_non_existent_domain(self): # https://answers.splunk.com/answers/337070/website-monitoring-app-setup.html#answer-338487 url_field = URLField("test_ping_non_existent_domain", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://xyz"), timeout=3) self.assertEqual(result.response_code, 0) self.assertEqual(result.request_time, 0) def test_ping_timeout(self): url_field = URLField("test_ping_timeout", "title", "this is a test") result = WebPing.ping(url_field.to_python("https://192.168.30.23/"), timeout=3) self.assertEqual(result.timed_out, True) def test_ping_with_headers(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, return_headers=True) self.assertEqual(result.response_code, 200) self.assertGreater(result.request_time, 0) self.assertGreater(len(result.headers), 0) self.assertEqual(result.headers['Content-type'], 'text/html') def test_is_exception_for_timeout(self): try: r = requests.get('https://192.168.30.23/') # website_monitoring_app.requests.packages.urllib3.exceptions.NewConnectionError except Exception as e: if not WebPing.isExceptionForTimeout(e): print(e) self.assertTrue(WebPing.isExceptionForTimeout(e)) def test_save_checkpoint(self): web_ping = WebPing() web_ping.save_checkpoint(self.tmp_dir, "web_ping://TextCritical.com", 100) self.assertEqual(web_ping.last_ran(self.tmp_dir, "web_ping://TextCritical.com"), 100) def test_is_expired(self): self.assertFalse(WebPing.is_expired(time.time(), 30)) self.assertTrue(WebPing.is_expired(time.time() - 31, 30)) def get_test_dir(self): return os.path.dirname(os.path.abspath(__file__)) def test_output_result(self): web_ping = WebPing(timeout=3) url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3) out = StringIO() web_ping.output_result(result, "stanza", "title", unbroken=True, close=True, out=out) self.assertTrue(out.getvalue().find("response_code=200") >= 0) def test_output_result_unavailable(self): web_ping = WebPing(timeout=3) url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://192.168.30.23/"), timeout=3) out = StringIO() web_ping.output_result(result, "stanza", "title", unbroken=True, close=True, out=out) self.assertTrue(out.getvalue().find("timed_out=True") >= 0) @skipIfNoServer def test_hash(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3) self.assertEqual(result.response_code, 200) self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23') @skipIfNoServer def test_hash_fips(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, fips_mode=True) self.assertEqual(result.response_code, 200) self.assertNotEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23') def test_missing_servername(self): """ Some web-servers require that the "Host" be included on SSL connections when the server is hosting multiple domains on the same IP. Without the host header, the server is unable to determine which certificate to provide and thus closes the connection. http://lukemurphey.net/issues/1035 """ url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("https://lukemurphey.net"), timeout=3) self.assertEqual(result.response_code, 200) @skipIfNoProxyServer def test_ping_over_proxy(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://textcritical.com"), timeout=3, proxy_type=self.proxy_type, proxy_server=self.proxy_address, proxy_port=self.proxy_port) self.assertEqual(result.response_code, 200) @skipIfNoServer def test_ping_with_basic_authentication(self): # Try with valid authentication url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port)), timeout=3, username="admin", password="changeme") self.assertEqual(result.response_code, 200) self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23') # Verify that bad authentication fails result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port)), timeout=3, username="admin", password="wrongpassword") self.assertEqual(result.response_code, 401) self.assertGreater(result.request_time, 0) def test_ping_with_digest_authentication(self): # Try with valid authentication url_field = URLField( "test_ping", "title", "this is a test") result = WebPing.ping( url_field.to_python("http://httpbin.org/digest-auth/auth/user/passwd"), timeout=3, username="user", password="passwd") self.assertEqual(result.response_code, 200) @skipIfNoServer def test_ping_with_ntlm_authentication(self): # Try with valid authentication url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth"), timeout=3, username="user\\domain", password="passwd", raise_all=True) self.assertEqual(result.response_code, 200) @skipIfNoServer def test_ping_with_ntlm_negotiate_authentication(self): # Try with valid authentication url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth_negotiate"), timeout=3, username="user\\domain", password="passwd") self.assertEqual(result.response_code, 200) def test_ping_with_ntlm_authentication_missing_domain(self): # Try with missing domain url_field = URLField( "test_ping", "title", "this is a test") self.assertRaises(NTLMAuthenticationValueException, lambda: WebPing.ping( url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth"), timeout=3, username="user", password="passwd")) @skipIfNoServer def test_ping_with_basic_authentication_optional(self): # Try with valid authentication url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/optional_auth"), timeout=3, username="admin", password="changeme") self.assertEqual(result.response_code, 203) # Verify that no authentication still works result = WebPing.ping( url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/optional_auth"), timeout=3) self.assertEqual(result.response_code, 202) self.assertGreater(result.request_time, 0) def test_https_with_basic_authentication(self): # Try with valid authentication url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("https://httpbin.org/basic-auth/foo/bar"), timeout=3, username="foo", password="bar") self.assertEqual(result.response_code, 200) # Verify that no authentication still works result = WebPing.ping(url_field.to_python("https://httpbin.org/basic-auth/foo/bar"), timeout=3, username="foo", password="WRONG_PASSWORD") self.assertEqual(result.response_code, 401) self.assertGreater(result.request_time, 0) @skipIfNoServer def test_determine_auth_method_basic(self): # Try with basic auth url_field = URLField( "test_ping", "title", "this is a test") auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port))) self.assertEqual(auth_type, WebPing.HTTP_AUTH_BASIC) def test_determine_auth_method_digest(self): # Try with digest auth url_field = URLField( "test_ping", "title", "this is a test") auth_type = WebPing.determine_auth_type(url_field.to_python("http://httpbin.org/digest-auth/auth/user/passwd")) self.assertEqual(auth_type, WebPing.HTTP_AUTH_DIGEST) @skipIfNoServer def test_determine_auth_method_ntlm(self): # Try with digest auth url_field = URLField( "test_ping", "title", "this is a test") auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth")) self.assertEqual(auth_type, WebPing.HTTP_AUTH_NTLM) @skipIfNoServer def test_determine_auth_method_ntlm_comma_header(self): # Try with digest auth url_field = URLField( "test_ping", "title", "this is a test") auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/ntlm_auth_negotiate")) self.assertEqual(auth_type, WebPing.HTTP_AUTH_NTLM) @skipIfNoServer def test_determine_auth_method_none(self): # Try with digest auth url_field = URLField( "test_ping", "title", "this is a test") auth_type = WebPing.determine_auth_type(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page")) self.assertEqual(auth_type, WebPing.HTTP_AUTH_NONE) @skipIfNoServer def test_custom_user_agent(self): """ http://lukemurphey.net/issues/1341 """ url_field = URLField("test_ping", "title", "this is a test") # Make sure that the server is validating the user-agent by returning 200 when the user-agent doesn't match # This just validates that the test case works result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/user_agent_check"), user_agent="USER_AGENT_CHECK_DOESNT_MATCH", timeout=3) self.assertEqual(result.response_code, 200) # Make sure that the server is validating the user-agent which returns 201 when the user-agent matches "USER_AGENT_CHECK" result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/user_agent_check"), user_agent="USER_AGENT_CHECK", timeout=3) self.assertEqual(result.response_code, 201) @skipIfNoServer def test_should_contain_string(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, should_contain_string="<h1>My First Heading</h1>") self.assertEqual(result.response_code, 200) self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23') self.assertEqual(result.has_expected_string, True) @skipIfNoServer def test_should_contain_string_no_match(self): url_field = URLField("test_ping", "title", "this is a test") result = WebPing.ping(url_field.to_python("http://127.0.0.1:" + str(self.web_server_port) + "/test_page"), timeout=3, should_contain_string="<h1>Should not Match!</h1>") self.assertEqual(result.response_code, 200) self.assertEqual(result.response_md5, '1f6c14189070f50c4c06ada640c14850') # This is 1f6c14189070f50c4c06ada640c14850 on disk self.assertEqual(result.response_sha224, 'deaf4c0062539c98b4e957712efcee6d42832fed2d803c2bbf984b23') self.assertEqual(result.has_expected_string, False) class TestOnCloud(unittest.TestCase): def setUp(self): super(TestOnCloud, self).setUp() # Configure an instance of the class to test self.web_ping = WebPing() # Force the class to act like it is on cloud self.web_ping.is_on_cloud = self.fake_is_on_cloud def fake_is_on_cloud(self, session_key): return True def test_get_proxy_config(self): # See https://lukemurphey.net/issues/2445 self.web_ping.is_on_cloud = self.fake_is_on_cloud self.web_ping.get_proxy_config('a session key') self.assertEqual(self.web_ping.get_proxy_config('a session key'), ("http", None, None, None, None, None)) if __name__ == '__main__': try: unittest.main() finally: if WebsiteMonitoringAppTest.proxyd is not None: WebsiteMonitoringAppTest.proxyd.shutdown()
multithreads4.py
import threading, time """ 线程安全问题,list不是线程安全的 """ m = [1, 2, 3, 4, 5] print(m[-1]) def remove_last(): a = m[-1] time.sleep(1) m.remove(a) t1 = threading.Thread(target=remove_last) t1.start() t2 = threading.Thread(target=remove_last) t2.start()
__init__.py
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import threading import time from collections import deque from multiprocessing import Lock from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.compat.six.moves import queue as Queue from ansible.compat.six import iteritems, string_types from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor import action_write_locks from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.playbook.task_include import TaskInclude from ansible.playbook.role_include import IncludeRole from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar from ansible.vars import combine_vars, strip_internal_keys from ansible.module_utils._text import to_text try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['StrategyBase'] # TODO: this should probably be in the plugins/__init__.py, with # a smarter mechanism to set all of the attributes based on # the loaders created there class SharedPluginLoaderObj: ''' A simple object to make pass the various plugin loaders to the forked processes over the queue easier ''' def __init__(self): self.action_loader = action_loader self.connection_loader = connection_loader self.filter_loader = filter_loader self.test_loader = test_loader self.lookup_loader = lookup_loader self.module_loader = module_loader _sentinel = object() def results_thread_main(strategy): while True: try: result = strategy._final_q.get() if type(result) == object: break else: strategy._results_lock.acquire() strategy._results.append(result) strategy._results_lock.release() except (IOError, EOFError): break except Queue.Empty: pass class StrategyBase: ''' This is the base class for strategy plugins, which contains some common code useful to all strategies like running handlers, cleanup actions, etc. ''' def __init__(self, tqm): self._tqm = tqm self._inventory = tqm.get_inventory() self._workers = tqm.get_workers() self._notified_handlers = tqm._notified_handlers self._listening_handlers = tqm._listening_handlers self._variable_manager = tqm.get_variable_manager() self._loader = tqm.get_loader() self._final_q = tqm._final_q self._step = getattr(tqm._options, 'step', False) self._diff = getattr(tqm._options, 'diff', False) # Backwards compat: self._display isn't really needed, just import the global display and use that. self._display = display # internal counters self._pending_results = 0 self._cur_worker = 0 # this dictionary is used to keep track of hosts that have # outstanding tasks still in queue self._blocked_hosts = dict() self._results = deque() self._results_lock = threading.Condition(threading.Lock()) # create the result processing thread for reading results in the background self._results_thread = threading.Thread(target=results_thread_main, args=(self,)) self._results_thread.daemon = True self._results_thread.start() def cleanup(self): self._final_q.put(_sentinel) self._results_thread.join() def run(self, iterator, play_context, result=0): # execute one more pass through the iterator without peeking, to # make sure that all of the hosts are advanced to their final task. # This should be safe, as everything should be ITERATING_COMPLETE by # this point, though the strategy may not advance the hosts itself. [iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] # save the failed/unreachable hosts, as the run_handlers() # method will clear that information during its execution failed_hosts = iterator.get_failed_hosts() unreachable_hosts = self._tqm._unreachable_hosts.keys() display.debug("running handlers") handler_result = self.run_handlers(iterator, play_context) if isinstance(handler_result, bool) and not handler_result: result |= self._tqm.RUN_ERROR elif not handler_result: result |= handler_result # now update with the hosts (if any) that failed or were # unreachable during the handler execution phase failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts()) unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys()) # return the appropriate code, depending on the status hosts after the run if not isinstance(result, bool) and result != self._tqm.RUN_OK: return result elif len(unreachable_hosts) > 0: return self._tqm.RUN_UNREACHABLE_HOSTS elif len(failed_hosts) > 0: return self._tqm.RUN_FAILED_HOSTS else: return self._tqm.RUN_OK def get_hosts_remaining(self, play): return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts] def get_failed_hosts(self, play): return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts] def add_tqm_variables(self, vars, play): ''' Base class method to add extra variables/information to the list of task vars sent through the executor engine regarding the task queue manager state. ''' vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)] vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)] def _queue_task(self, host, task, task_vars, play_context): ''' handles queueing the task up to be sent to a worker ''' display.debug("entering _queue_task() for %s/%s" % (host.name, task.action)) # Add a write lock for tasks. # Maybe this should be added somewhere further up the call stack but # this is the earliest in the code where we have task (1) extracted # into its own variable and (2) there's only a single code path # leading to the module being run. This is called by three # functions: __init__.py::_do_handler_run(), linear.py::run(), and # free.py::run() so we'd have to add to all three to do it there. # The next common higher level is __init__.py::run() and that has # tasks inside of play_iterator so we'd have to extract them to do it # there. if task.action not in action_write_locks.action_write_locks: display.debug('Creating lock for %s' % task.action) action_write_locks.action_write_locks[task.action] = Lock() # and then queue the new task try: # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() queued = False starting_worker = self._cur_worker while True: (worker_prc, rslt_q) = self._workers[self._cur_worker] if worker_prc is None or not worker_prc.is_alive(): worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) self._workers[self._cur_worker][0] = worker_prc worker_prc.start() display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) queued = True self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 if queued: break elif self._cur_worker == starting_worker: time.sleep(0.0001) self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: # most likely an abort display.debug("got an error while queuing: %s" % e) return display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action)) def _process_pending_results(self, iterator, one_pass=False, max_passes=None): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). ''' ret_results = [] def get_original_host(host_name): host_name = to_text(host_name) if host_name in self._inventory._hosts_cache: return self._inventory._hosts_cache[host_name] else: return self._inventory.get_host(host_name) def search_handler_blocks_by_name(handler_name, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_task.name: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task) templar = Templar(loader=self._loader, variables=handler_vars) try: # first we check with the full result of get_name(), which may # include the role name (if the handler is from a role). If that # is not found, we resort to the simple name field, which doesn't # have anything extra added to it. target_handler_name = templar.template(handler_task.name) if target_handler_name == handler_name: return handler_task else: target_handler_name = templar.template(handler_task.get_name()) if target_handler_name == handler_name: return handler_task except (UndefinedError, AnsibleUndefinedVariable): # We skip this handler due to the fact that it may be using # a variable in the name that was conditionally included via # set_fact or some other method, and we don't want to error # out unnecessarily continue return None def search_handler_blocks_by_uuid(handler_uuid, handler_blocks): for handler_block in handler_blocks: for handler_task in handler_block.block: if handler_uuid == handler_task._uuid: return handler_task return None def parent_handler_match(target_handler, handler_name): if target_handler: if isinstance(target_handler, (TaskInclude, IncludeRole)): try: handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler) templar = Templar(loader=self._loader, variables=handler_vars) target_handler_name = templar.template(target_handler.name) if target_handler_name == handler_name: return True else: target_handler_name = templar.template(target_handler.get_name()) if target_handler_name == handler_name: return True except (UndefinedError, AnsibleUndefinedVariable): pass return parent_handler_match(target_handler._parent, handler_name) else: return False # a Templar class to use for templating things later, as we're using # original/non-validated objects here on the manager side. We set the # variables in use later inside the loop below templar = Templar(loader=self._loader) cur_pass = 0 while True: try: self._results_lock.acquire() task_result = self._results.pop() except IndexError: break finally: self._results_lock.release() # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc. original_host = get_original_host(task_result._host) original_task = iterator.get_original_task(original_host, task_result._task) task_result._host = original_host task_result._task = original_task # get the correct loop var for use later if original_task.loop_control: loop_var = original_task.loop_control.loop_var or 'item' else: loop_var = 'item' # get the vars for this task/host pair, make them the active set of vars for our templar above task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=original_host, task=original_task) self.add_tqm_variables(task_vars, play=iterator._play) templar.set_available_variables(task_vars) # send callbacks for 'non final' results if '_ansible_retry' in task_result._result: self._tqm.send_callback('v2_runner_retry', task_result) continue elif '_ansible_item_result' in task_result._result: if task_result.is_failed() or task_result.is_unreachable(): self._tqm.send_callback('v2_runner_item_on_failed', task_result) elif task_result.is_skipped(): self._tqm.send_callback('v2_runner_item_on_skipped', task_result) else: if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._tqm.send_callback('v2_runner_item_on_ok', task_result) continue run_once = templar.template(original_task.run_once) if original_task.register: if run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [original_host] clean_copy = strip_internal_keys(task_result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy}) # all host status messages contain 2 entries: (msg, task_result) role_ran = False if task_result.is_failed(): role_ran = True ignore_errors = templar.template(original_task.ignore_errors) if not ignore_errors: display.debug("marking %s as failed" % original_host.name) if run_once: # if we're using run_once, we have to fail every host here for h in self._inventory.get_hosts(iterator._play.hosts): if h.name not in self._tqm._unreachable_hosts: state, _ = iterator.get_next_task_for_host(h, peek=True) iterator.mark_host_failed(h) state, new_task = iterator.get_next_task_for_host(h, peek=True) else: iterator.mark_host_failed(original_host) # increment the failed count for this host self._tqm._stats.increment('failures', original_host.name) # grab the current state and if we're iterating on the rescue portion # of a block then we save the failed task in a special var for use # within the rescue/always state, _ = iterator.get_next_task_for_host(original_host, peek=True) if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE: self._tqm._failed_hosts[original_host.name] = True if state and state.run_state == iterator.ITERATING_RESCUE: self._variable_manager.set_nonpersistent_facts( original_host, dict( ansible_failed_task=original_task.serialize(), ansible_failed_result=task_result._result, ), ) else: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors) elif task_result.is_unreachable(): self._tqm._unreachable_hosts[original_host.name] = True iterator._play._removed_hosts.append(original_host.name) self._tqm._stats.increment('dark', original_host.name) self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif task_result.is_skipped(): self._tqm._stats.increment('skipped', original_host.name) self._tqm.send_callback('v2_runner_on_skipped', task_result) else: role_ran = True if original_task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result result_items = task_result._result.get('results', []) else: result_items = [ task_result._result ] for result_item in result_items: if '_ansible_notify' in result_item: if task_result.is_changed(): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for handler_name in result_item['_ansible_notify']: found = False # Find the handler using the above helper. First we look up the # dependency chain of the current task (if it's from a role), otherwise # we just look through the list of handlers in the current play/all # roles and use the first one that matches the notify name target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers) if target_handler is not None: found = True if original_host not in self._notified_handlers[target_handler._uuid]: self._notified_handlers[target_handler._uuid].append(original_host) # FIXME: should this be a callback? display.vv("NOTIFIED HANDLER %s" % (handler_name,)) else: # As there may be more than one handler with the notified name as the # parent, so we just keep track of whether or not we found one at all for target_handler_uuid in self._notified_handlers: target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers) if target_handler and parent_handler_match(target_handler, handler_name): self._notified_handlers[target_handler._uuid].append(original_host) display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),)) found = True if handler_name in self._listening_handlers: for listening_handler_uuid in self._listening_handlers[handler_name]: listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers) if listening_handler is not None: found = True else: continue if original_host not in self._notified_handlers[listening_handler._uuid]: self._notified_handlers[listening_handler._uuid].append(original_host) display.vv("NOTIFIED HANDLER %s" % (listening_handler.get_name(),)) # and if none were found, then we raise an error if not found: msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name if C.ERROR_ON_MISSING_HANDLER: raise AnsibleError(msg) else: display.warning(msg) if 'add_host' in result_item: # this task added a new host (add_host module) new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) elif 'add_group' in result_item: # this task added a new group (group_by module) self._add_group(original_host, result_item) elif 'ansible_facts' in result_item: # if delegated fact and we are delegating facts, we need to change target host for them if original_task.delegate_to is not None and original_task.delegate_facts: item = result_item.get(loop_var, None) if item is not None: task_vars[loop_var] = item host_name = templar.template(original_task.delegate_to) actual_host = self._inventory.get_host(host_name) if actual_host is None: actual_host = Host(name=host_name) else: actual_host = original_host if original_task.action == 'include_vars': for (var_name, var_value) in iteritems(result_item['ansible_facts']): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all if run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) else: if run_once: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] else: host_list = [actual_host] for target_host in host_list: if original_task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy()) else: self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy()) if 'diff' in task_result._result: if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) if original_task.action not in ['include', 'include_role']: self._tqm._stats.increment('ok', original_host.name) if 'changed' in task_result._result and task_result._result['changed']: self._tqm._stats.increment('changed', original_host.name) # finally, send the ok for this task self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if original_host.name in self._blocked_hosts: del self._blocked_hosts[original_host.name] # If this is a role task, mark the parent role as being run (if # the task was ok or failed, but not skipped or unreachable) if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':? # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]): if role_obj._uuid == original_task._role._uuid: role_obj._had_task_run[original_host.name] = True ret_results.append(task_result) if one_pass or max_passes is not None and (cur_pass+1) >= max_passes: break cur_pass += 1 return ret_results def _wait_on_pending_results(self, iterator): ''' Wait for the shared counter to drop to zero, using a short sleep between checks to ensure we don't spin lock ''' ret_results = [] display.debug("waiting for pending results...") while self._pending_results > 0 and not self._tqm._terminated: if self._tqm.has_dead_workers(): raise AnsibleError("A worker was found in a dead state") results = self._process_pending_results(iterator) ret_results.extend(results) if self._pending_results > 0: time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL) display.debug("no more pending results, returning what we have") return ret_results def _add_host(self, host_info, iterator): ''' Helper function to add a new host to inventory based on a task result. ''' host_name = host_info.get('host_name') # Check if host in inventory, add if not new_host = self._inventory.get_host(host_name) if not new_host: new_host = Host(name=host_name) self._inventory._hosts_cache[host_name] = new_host self._inventory.get_host_vars(new_host) allgroup = self._inventory.get_group('all') allgroup.add_host(new_host) # Set/update the vars for this host new_host.vars = combine_vars(new_host.vars, self._inventory.get_host_vars(new_host)) new_host.vars = combine_vars(new_host.vars, host_info.get('host_vars', dict())) new_groups = host_info.get('groups', []) for group_name in new_groups: if not self._inventory.get_group(group_name): new_group = Group(group_name) self._inventory.add_group(new_group) self._inventory.get_group_vars(new_group) new_group.vars = self._inventory.get_group_variables(group_name) else: new_group = self._inventory.get_group(group_name) new_group.add_host(new_host) # add this host to the group cache if self._inventory.groups is not None: if group_name in self._inventory.groups: if new_host not in self._inventory.get_group(group_name).hosts: self._inventory.get_group(group_name).hosts.append(new_host.name) # clear pattern caching completely since it's unpredictable what # patterns may have referenced the group self._inventory.clear_pattern_cache() # clear cache of group dict, which is used in magic host variables self._inventory.clear_group_dict_cache() # also clear the hostvar cache entry for the given play, so that # the new hosts are available if hostvars are referenced self._variable_manager.invalidate_hostvars_cache(play=iterator._play) def _add_group(self, host, result_item): ''' Helper function to add a group (if it does not exist), and to assign the specified host to that group. ''' changed = False # the host here is from the executor side, which means it was a # serialized/cloned copy and we'll need to look up the proper # host object from the master inventory real_host = self._inventory.get_host(host.name) group_name = result_item.get('add_group') new_group = self._inventory.get_group(group_name) if not new_group: # create the new group and add it to inventory new_group = Group(name=group_name) self._inventory.add_group(new_group) new_group.vars = self._inventory.get_group_vars(new_group) # and add the group to the proper hierarchy allgroup = self._inventory.get_group('all') allgroup.add_child_group(new_group) changed = True if group_name not in host.get_groups(): new_group.add_host(real_host) changed = True if changed: # clear cache of group dict, which is used in magic host variables self._inventory.clear_group_dict_cache() return changed def _load_included_file(self, included_file, iterator, is_handler=False): ''' Loads an included YAML file of tasks, applying the optional set of variables. ''' display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: return [] elif not isinstance(data, list): raise AnsibleError("included task files must contain a list of tasks") ti_copy = included_file._task.copy() temp_vars = ti_copy.vars.copy() temp_vars.update(included_file._args) # pop tags out of the include args, if they were specified there, and assign # them to the include. If the include already had tags specified, we raise an # error so that users know not to specify them both ways tags = included_file._task.vars.pop('tags', []) if isinstance(tags, string_types): tags = tags.split(',') if len(tags) > 0: if len(included_file._task.tags) > 0: raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", obj=included_file._task._ds) display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") included_file._task.tags = tags ti_copy.vars = temp_vars block_list = load_list_of_blocks( data, play=iterator._play, parent_block=None, task_include=ti_copy, role=included_file._task._role, use_handlers=is_handler, loader=self._loader, variable_manager=self._variable_manager, ) # since we skip incrementing the stats when the task result is # first processed, we do so now for each host in the list for host in included_file._hosts: self._tqm._stats.increment('ok', host.name) except AnsibleError as e: # mark all of the hosts including this file as failed, send callbacks, # and increment the stats for this host for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e))) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) self._tqm.send_callback('v2_runner_on_failed', tr) return [] # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) display.debug("done processing included file") return block_list def run_handlers(self, iterator, play_context): ''' Runs handlers on those hosts which have been notified. ''' result = self._tqm.RUN_OK for handler_block in iterator._play.handlers: # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when # we consider the ability of meta tasks to flush handlers for handler in handler_block.block: if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]): result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context) if not result: break return result def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None): # FIXME: need to use iterator.get_failed_hosts() instead? #if not len(self.get_hosts_remaining(iterator._play)): # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') # result = False # break saved_name = handler.name handler.name = handler_name self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) handler.name = saved_name if notified_hosts is None: notified_hosts = self._notified_handlers[handler._uuid] run_once = False try: action = action_loader.get(handler.action, class_only=True) if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass host_results = [] for host in notified_hosts: if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) self.add_tqm_variables(task_vars, play=iterator._play) self._queue_task(host, handler, task_vars, play_context) if run_once: break # collect the results from the handler run host_results = self._wait_on_pending_results(iterator) try: included_files = IncludedFile.process_include_results( host_results, self._tqm, iterator=iterator, inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager ) except AnsibleError as e: return False result = True if len(included_files) > 0: for included_file in included_files: try: new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True) # for every task in each block brought in by the include, add the list # of hosts which included the file to the notified_handlers dict for block in new_blocks: iterator._play.handlers.append(block) iterator.cache_block_tasks(block) for task in block.block: result = self._do_handler_run( handler=task, handler_name=None, iterator=iterator, play_context=play_context, notified_hosts=included_file._hosts[:], ) if not result: break except AnsibleError as e: for host in included_file._hosts: iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True display.warning(str(e)) continue # wipe the notification list self._notified_handlers[handler._uuid] = [] display.debug("done running handlers, result is: %s" % result) return result def _take_step(self, task, host=None): ret=False msg=u'Perform task: %s ' % task if host: msg += u'on %s ' % host msg += u'(N)o/(y)es/(c)ontinue: ' resp = display.prompt(msg) if resp.lower() in ['y','yes']: display.debug("User ran task") ret = True elif resp.lower() in ['c', 'continue']: display.debug("User ran task and cancled step mode") self._step = False ret = True else: display.debug("User skipped task") display.banner(msg) return ret def _execute_meta(self, task, play_context, iterator, target_host=None): # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') # FIXME(s): # * raise an error or show a warning when a conditional is used # on a meta task that doesn't support them def _evaluate_conditional(h): all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) templar = Templar(loader=self._loader, variables=all_vars) return task.evaluate_conditional(templar, all_vars) if target_host: host_list = [target_host] else: host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] results = [] for host in host_list: result = None if meta_action == 'noop': # FIXME: issue a callback for the noop here? result = TaskResult(host, task, dict(changed=False, msg="noop")) elif meta_action == 'flush_handlers': self.run_handlers(iterator, play_context) elif meta_action == 'refresh_inventory': self._inventory.refresh_inventory() result = TaskResult(host, task, dict(changed=False, msg="inventory successfully refreshed")) elif meta_action == 'clear_facts': if _evaluate_conditional(host): self._variable_manager.clear_facts(target_host) result = TaskResult(host, task, dict(changed=True, msg="inventory successfully refreshed")) else: result = TaskResult(host, task, dict(changed=False, skipped=True)) elif meta_action == 'clear_host_errors': if _evaluate_conditional(host): self._tqm._failed_hosts.pop(host.name, False) self._tqm._unreachable_hosts.pop(host.name, False) iterator._host_states[host.name].fail_state = iterator.FAILED_NONE result = TaskResult(host, task, dict(changed=True, msg="successfully cleared host errors")) else: result = TaskResult(host, task, dict(changed=False, skipped=True)) elif meta_action == 'end_play': if _evaluate_conditional(host): iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE result = TaskResult(host, task, dict(changed=True, msg="ending play")) else: result = TaskResult(host, task, dict(changed=False, skipped=True)) #elif meta_action == 'reset_connection': # connection_info.connection.close() else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) if result is not None: results.append(result) return results
controller.py
from constants import mode_map from osc4py3.as_eventloop import * from osc4py3 import oscmethod as osm from osc4py3 import oscbuildparse import curses import os import threading import time class Controller(): def __init__(self, snd_port=31636, rcv_port=31637 ): self.log = [] osc_startup() osc_udp_client( "127.0.0.1", snd_port, "sender") osc_udp_server( "127.0.0.1", rcv_port, "receiver") osc_method("/*", self.debug_log, argscheme=osm.OSCARG_ADDRESS + osm.OSCARG_DATA) def process(self): osc_process() def set_mode(self, mode): addr = f"/mode" msg = oscbuildparse.OSCMessage(addr, None, mode) osc_send(msg, "sender") def debug_log(self, addr, data): self.log.append(''.join(data)) def draw(win, current_mode, log): win.clear() win.addstr(f"Menu:\n") for i, (mode, (pdac_mode, ml_mode)) in enumerate(mode_map.items()): win.addstr(f"\t{i + 1} : {mode}\t ({pdac_mode}, {ml_mode})\n") win.addstr(f"\n") win.addstr(f"Current: {current_mode}") win.addstr(f"\n") win.addstr(f"------------------------------------\n") for line in log: try: win.addstr(f"{line.encode('utf-8')}\n") except curses.error as e: pass def main(win): controller = Controller() win.nodelay(False) mode = "" draw(win, mode, controller.log ) running = True def loop(): while(running): draw(win, mode, controller.log ) controller.process() time.sleep(0.01) thread = threading.Thread(target=loop) thread.start() while True: time.sleep(0.01) try: selected = win.getkey() try: selected = int(selected) except ValueError: continue if(selected <= len(mode_map) and selected > 0 ): mode = list(mode_map.items())[selected - 1] mode_str = str(mode[0]) controller.set_mode(mode_str) except curses.error as e: pass curses.wrapper(main)
mod_print.py
#!/usr/bin/env python # # mod_print.py # send a file to a printer device # # Neil Gershenfeld # (c) Massachusetts Institute of Technology 2015 # # This work may be reproduced, modified, distributed, # performed, and displayed for any purpose, but must # acknowledge the fab modules project. Copyright is # retained and must be preserved. The work is provided # as is; no warranty is provided, and users accept all # liability. # # imports # import sys,string,threading,time,os from Tkinter import * # # globals # WINDOW = 400 # window size RUN = 0 PAUSE = 1 CANCEL = 2 state = RUN # # send routine # def send(canvas,data,device,separator): global state,tstart n = 0 N = string.count(data,separator) # # loop over commands # pointer = 0 while 1: # # check for pause # if (state == PAUSE): while (state == PAUSE): time.sleep(0.001) # # check for cancel # if (state == CANCEL): break # # find next command # position = string.find(data,separator,pointer) if (position == -1): # # break if not found # break command = data[pointer:(position+1)] pointer = position+1 # # send the command # device.write(command) # # update the GUI # n += 1 percent = (100.0*n)/N dt = (time.time()-tstart)/60.0 totalt = (dt/n)*N canvas.itemconfigure("text",text="sending %.1f%% (%.0f/%.0f min)"%(percent,dt,totalt)) canvas.update() device.close() os._exit(0) # # pause routine # def pause(): global state if (state == RUN): state = PAUSE pause_button.config(text="continue") elif (state == PAUSE): state = RUN pause_button.config(text="pause") # # cancel routine # def cancel(): global state state = CANCEL cancel_button.config(text="canceling ...") # # quit routine # def quit(): s.close() sys.exit() # # command line # if (len(sys.argv) != 4): print "command line: mod_print.py device separator file" print " device = printer device" print " separator = command separator character ('' to ignore)" print " file = file to send" sys.exit(1) device_name = sys.argv[1] separator = sys.argv[2] file_name = sys.argv[3] # # open file # try: file_handle = open(file_name) data = file_handle.read() file_handle.close() except: print 'error: can not open file '+file_name sys.exit(1) # # open device # try: device = open(device_name,'w',0) except: print 'error: can not open device '+device_name sys.exit(1) # # send file and exit if no command separator # if (separator == ''): device.write(data) device.close() sys.exit(0) # # if command separator, set up GUI # # # set up GUI # root = Tk() root.title('mod_print.py') canvas = Canvas(root, width=WINDOW, height=.25*WINDOW, background='white') canvas.create_text(.5*WINDOW,.1*WINDOW,text="",font=("Helvetica",24),tags="text",fill="#0000b0") canvas.pack() pause_button = Button(root,text="pause",command=pause) pause_button.pack() cancel_button = Button(root,text="cancel",command=cancel) cancel_button.pack() # # start sending thread # tstart = time.time() t = threading.Thread(target=send,args=(canvas,data,device,separator)) t.start() # # start UI loop # root.mainloop()
power_monitoring.py
import random import threading import time from statistics import mean from cereal import log from common.params import Params, put_nonblocking from common.realtime import sec_since_boot from selfdrive.hardware import HARDWARE from selfdrive.swaglog import cloudlog PANDA_OUTPUT_VOLTAGE = 5.28 CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1)) # A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars # While driving, a battery charges completely in about 30-60 minutes CAR_BATTERY_CAPACITY_uWh = 30e6 CAR_CHARGING_RATE_W = 45 VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss MAX_TIME_OFFROAD_S = 30*3600 MIN_ON_TIME_S = 3600 # Parameters def get_battery_capacity(): return _read_param("/sys/class/power_supply/battery/capacity", int) # Helpers def _read_param(path, parser, default=0): try: with open(path) as f: return parser(f.read()) except Exception: return default def panda_current_to_actual_current(panda_current): # From white/grey panda schematic return (3.3 - (panda_current * 3.3 / 4096)) / 8.25 class PowerMonitoring: def __init__(self): self.params = Params() self.last_measurement_time = None # Used for integration delta self.last_save_time = 0 # Used for saving current value in a param self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad self.next_pulsed_measurement_time = None self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage self.integration_lock = threading.Lock() car_battery_capacity_uWh = self.params.get("CarBatteryCapacity") if car_battery_capacity_uWh is None: car_battery_capacity_uWh = 0 # Reset capacity if it's low self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh)) # Calculation tick def calculate(self, pandaState): try: now = sec_since_boot() # If pandaState is None, we're probably not in a car, so we don't care if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown: with self.integration_lock: self.last_measurement_time = None self.next_pulsed_measurement_time = None self.power_used_uWh = 0 return # Low-pass battery voltage self.car_voltage_instant_mV = pandaState.pandaState.voltage self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K))) # Cap the car battery power and save it in a param every 10-ish seconds self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0) self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh) if now - self.last_save_time >= 10: put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh))) self.last_save_time = now # First measurement, set integration time with self.integration_lock: if self.last_measurement_time is None: self.last_measurement_time = now return if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan): # If there is ignition, we integrate the charging rate of the car with self.integration_lock: self.power_used_uWh = 0 integration_time_h = (now - self.last_measurement_time) / 3600 if integration_time_h < 0: raise ValueError(f"Negative integration time: {integration_time_h}h") self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h) self.last_measurement_time = now else: # No ignition, we integrate the offroad power used by the device is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno # Get current power draw somehow current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none if current_power is not None: pass elif HARDWARE.get_battery_status() == 'Discharging': # If the battery is discharging, we can use this measurement # On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000)) elif (pandaState.pandaState.pandaType in [log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda]) and (pandaState.pandaState.current > 1): # If white/grey panda, use the integrated current measurements if the measurement is not 0 # If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda # This seems to be accurate to about 5% current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current)) elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now): # TODO: Figure out why this is off by a factor of 3/4??? FUDGE_FACTOR = 1.33 # Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal def perform_pulse_measurement(now): try: HARDWARE.set_battery_charging(False) time.sleep(5) # Measure for a few sec to get a good average voltages = [] currents = [] for _ in range(6): voltages.append(HARDWARE.get_battery_voltage()) currents.append(HARDWARE.get_battery_current()) time.sleep(1) current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000)) self._perform_integration(now, current_power * FUDGE_FACTOR) # Enable charging again HARDWARE.set_battery_charging(True) except Exception: cloudlog.exception("Pulsed power measurement failed") # Start pulsed measurement and return threading.Thread(target=perform_pulse_measurement, args=(now,)).start() self.next_pulsed_measurement_time = None return elif self.next_pulsed_measurement_time is None and not is_uno: # On a charging EON with black panda, or drawing more than 400mA out of a white/grey one # Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is # We shouldn't do this very often, so make sure it has been some long-ish random time interval self.next_pulsed_measurement_time = now + random.randint(120, 180) return else: # Do nothing return # Do the integration self._perform_integration(now, current_power) except Exception: cloudlog.exception("Power monitoring calculation failed") def _perform_integration(self, t, current_power): with self.integration_lock: try: if self.last_measurement_time: integration_time_h = (t - self.last_measurement_time) / 3600 power_used = (current_power * 1000000) * integration_time_h if power_used < 0: raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh") self.power_used_uWh += power_used self.car_battery_capacity_uWh -= power_used self.last_measurement_time = t except Exception: cloudlog.exception("Integration failed") # Get the power usage def get_power_used(self): return int(self.power_used_uWh) def get_car_battery_capacity(self): return int(self.car_battery_capacity_uWh) # See if we need to disable charging def should_disable_charging(self, pandaState, offroad_timestamp): if pandaState is None or offroad_timestamp is None: return False now = sec_since_boot() disable_charging = False disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3)) disable_charging |= (self.car_battery_capacity_uWh <= 0) disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan) disable_charging &= (not self.params.get_bool("DisablePowerDown")) disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected) disable_charging |= self.params.get_bool("ForcePowerDown") return disable_charging # See if we need to shutdown def should_shutdown(self, pandaState, offroad_timestamp, started_seen): if pandaState is None or offroad_timestamp is None: return False now = sec_since_boot() panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client) BATT_PERC_OFF = 10 should_shutdown = False # Wait until we have shut down charging before powering down should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp)) should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60)) should_shutdown &= started_seen or (now > MIN_ON_TIME_S) return should_shutdown
window.py
# _*_coding:utf8_*_ # Project: lin_mass_tools # File: window.py # Author: ClassmateLin # Email: 406728295@qq.com # Time: 2020/2/23 7:53 上午 # DESC: import os import sys import random import time from PyQt5.QtWidgets import QLineEdit, QLabel, QPushButton, QVBoxLayout, QHBoxLayout, QComboBox, QFileDialog from PyQt5.QtWidgets import QWidget, QTableWidget, QTableWidgetItem, QAbstractItemView, QHeaderView, QTextBrowser from PyQt5.QtGui import QIntValidator, QFont from PyQt5 import QtCore, QtGui from PyQt5.QtWidgets import QMessageBox, QDesktopWidget, QApplication from PyQt5.QtCore import pyqtSignal import threading import queue from app.background import get_backgound from app.article import CSDNArticle from app.proxy import QuickProxy, XunProxy from app.visitor import RequestVisitor PROXY_LIST = ['免费代理', '讯代理(付费)'] VISIT_MODEL = ['无界面访问', '浏览器访问(需安装驱动)'] PROXY_TITLE_MAP = { 0: '免费代理', 1: '讯代理(付费)' } PROXY_CLASS_MAP = { 0: QuickProxy, 1: XunProxy } MESSAGE_TITLE = '提示' # 消息框标题 LABEL_STYLE_SHEET = "QLabel{border:2px groove gray;border-radius:10px;padding:2px 4px;color:black;}" START_BUTTON_STYLE_SHEET = "QPushButton{border:2px groove gray;border-radius:10px;padding:2px 4px;color:green;}" PROXY_BUTTON_STYLE_SHEET = "QPushButton{border:2px groove gray;border-radius:10px;padding:2px 4px;color:black;}" STOP_BUTTON_STYLE_SHEET = "QPushButton{border:2px groove gray;border-radius:10px;padding:2px 4px;color:yellow;}" IMPORT_BUTTON_STYLE_SHEET = "QPushButton{border:2px groove gray;border-radius:10px;padding:2px 4px;color:blue;}" DESTROY_BUTTON_STYLE_SHEET = "QPushButton{border:2px groove gray;border-radius:10px;padding:2px 4px;color:red;}" TEXT_BROWSER_STYLE_SHEET = "QTextBrowser{border:2px groove gray;border-radius:10px;padding:2px 4px;color:black;}" TABLE_STYLE_SHEET = "QTableWidget{border:2px groove gray;border-radius:10px;padding:2px 4px;color:black;}" LINE_EDIT_STYLE_SHEET = "QLineEdit{border:2px groove gray;border-radius:10px;padding:2px 4px;}" WIDGET_STYLE_SHEET = "QWidget{color:gray;}" TITLE_STYLE_SHEET = "QLabel{padding:2px 4px;color:black;}" CHECK_BOX_SHEET = "QComboBox{border:2px groove gray;border-radius:10px;padding:2px 4px;color:black;}" class Window(QWidget): """ 窗体 """ table_read_num_signal = pyqtSignal(dict) # 阅读数更新信号 log_text_signal = pyqtSignal(str) def __init__(self): """ 初始化窗体 """ super().__init__() self.w_layout = None self._start_btn = None # 开始按钮 self._stop_btn = None # 结束按钮 self._destroy_btn = None # 退出按钮 self._proxy_btn = None # 获取代理 self._blog_name_input = None # 博客名称输入 self._thread_num_input = None # 线程数输入 self._while_num_input = None # 轮数输入 self._visit_space_input = None # 访问间隔输入 self._import_proxy_btn = None self._articles_table = None # 文章表格 self._log_text_browser = None # 文本显示 self._order_no = None self._secret = None self._proxy_input = None self._proxy_check_box = None self._visit_model_check_box = None self.setStyleSheet(WIDGET_STYLE_SHEET) self.resize(800, 600) self.setup_ui() # 设置控件 self._init_log_text() self._start_btn.clicked.connect(self.start) # 开始 self._stop_btn.clicked.connect(self.stop) # 终止 self._destroy_btn.clicked.connect(self.destroy) # 退出 self._proxy_btn.clicked.connect(self._on_get_proxy) # 获取代理 self._import_proxy_btn.clicked.connect(self.import_proxy) # 导入代理 self._is_start = False # 标志是否已经开始 self._running = True # 标识是否运行改为, False子线程会退出 self.table_read_num_signal.connect(self._update_table_read_num) self.log_text_signal.connect(self._show_to_log) self._proxies = queue.Queue() self._lock = threading.Lock() self.setMinimumSize(600, 500) self._articles = [] def _on_get_proxy(self): """ 获取代理按钮曹函数 :return: """ self.log_text_signal.emit('正在获取代理') page = int(self._proxy_input.text()) proxy_obj = QuickProxy(page) proxies = proxy_obj.get_all() random.shuffle(proxies) print(proxies) for pro in proxies: self._proxies.put(pro) self.log_text_signal.emit('代理:{}'.format(pro)) self.log_text_signal.emit('获取免费代理完成...') def center(self): """ 窗口居中 :return: """ # 获得窗口 qr = self.frameGeometry() # 获得屏幕中心点 cp = QDesktopWidget().availableGeometry().center() # 显示到屏幕中心 qr.moveCenter(cp) self.move(qr.topLeft()) def setup_ui(self): _translate = QtCore.QCoreApplication.translate self.w_layout = QVBoxLayout() # 全局布局采用垂直布局 h_layout1 = QHBoxLayout() blog_name_label = QLabel('博客名称') blog_name_label.setStyleSheet(LABEL_STYLE_SHEET) thread_num_label = QLabel('线程数') thread_num_label.setStyleSheet(LABEL_STYLE_SHEET) while_num_label = QLabel('访问轮数') while_num_label.setStyleSheet(LABEL_STYLE_SHEET) visit_space_label = QLabel('访问间隔') visit_space_label.setStyleSheet(LABEL_STYLE_SHEET) self._blog_name_input = QLineEdit('ClassmateLin', minimumWidth=100) # 博客名称输入框 self._blog_name_input.setStyleSheet(LINE_EDIT_STYLE_SHEET) self._thread_num_input = QLineEdit('5', minimumWidth=2) # 线程数量输入框 self._thread_num_input.setStyleSheet(LINE_EDIT_STYLE_SHEET) self._thread_num_input.setMaxLength(2) thread_num_validator = QIntValidator(self._blog_name_input) thread_num_validator.setRange(1, 99) self._thread_num_input.setValidator(thread_num_validator) self._while_num_input = QLineEdit('5', minimumWidth=5) # 访问轮数输入框 self._while_num_input.setStyleSheet(LINE_EDIT_STYLE_SHEET) self._while_num_input.setMaxLength(6) while_num_validator = QIntValidator(self._while_num_input) while_num_validator.setRange(1, 65535) self._while_num_input.setValidator(while_num_validator) self._visit_space_input = QLineEdit('1', minimumWidth=2) # 访问间隔秒 self._visit_space_input.setStyleSheet(LINE_EDIT_STYLE_SHEET) visit_space_validator = QIntValidator(self._visit_space_input) visit_space_validator.setRange(1, 60) self._visit_space_input.setValidator(visit_space_validator) self._visit_space_input.setMaxLength(2) h_layout2 = QHBoxLayout() proxy_label = QLabel('代理页数') proxy_label.setStyleSheet(LABEL_STYLE_SHEET) self._proxy_input = QLineEdit('5', minimumWidth=2) self._proxy_input.setStyleSheet(LINE_EDIT_STYLE_SHEET) proxy_validator = QIntValidator() proxy_validator.setRange(1, 99) self._proxy_input.setValidator(proxy_validator) self._proxy_btn = QPushButton('获取代理') self._proxy_btn.setStyleSheet(PROXY_BUTTON_STYLE_SHEET) self._proxy_check_box = QComboBox() self._proxy_check_box.addItems(PROXY_LIST) self._proxy_check_box.setStyleSheet(CHECK_BOX_SHEET) self._visit_model_check_box = QComboBox() self._visit_model_check_box.addItems(VISIT_MODEL) self._visit_model_check_box.setStyleSheet(CHECK_BOX_SHEET) self._start_btn = QPushButton('开始') # 开始刷访问量按钮 self._start_btn.setStyleSheet(START_BUTTON_STYLE_SHEET) self._stop_btn = QPushButton('停止') # 停止刷访问量按钮 self._stop_btn.setStyleSheet(STOP_BUTTON_STYLE_SHEET) self._destroy_btn = QPushButton('退出') self._destroy_btn.setStyleSheet(DESTROY_BUTTON_STYLE_SHEET) h_layout1.addWidget(blog_name_label) h_layout1.addWidget(self._blog_name_input) h_layout1.addWidget(thread_num_label) h_layout1.addWidget(self._thread_num_input) h_layout1.addWidget(while_num_label) h_layout1.addWidget(self._while_num_input) h_layout1.addWidget(visit_space_label) h_layout1.addWidget(self._visit_space_input) h_layout2.addWidget(proxy_label) h_layout2.addWidget(self._proxy_input) h_layout2.addWidget(self._proxy_check_box) h_layout2.addWidget(self._proxy_btn) h_layout2.addWidget(self._visit_model_check_box) h_layout2.addWidget(self._start_btn) h_layout2.addWidget(self._stop_btn) h_layout2.addWidget(self._destroy_btn) h_widget1 = QWidget() h_widget1.setLayout(h_layout1) h_widget2 = QWidget() h_widget2.setLayout(h_layout2) h_widget3 = QWidget() h_layout3 = QHBoxLayout() h_widget3.setLayout(h_layout3) order_label = QLabel('讯代理订单号') order_label.setStyleSheet(LABEL_STYLE_SHEET) h_layout3.addWidget(order_label) self._order_no = QLineEdit('ZFxxx') self._order_no.setStyleSheet(LINE_EDIT_STYLE_SHEET) h_layout3.addWidget(self._order_no) secret_label = QLabel('讯代理秘钥') secret_label.setStyleSheet(LABEL_STYLE_SHEET) h_layout3.addWidget(secret_label) self._secret = QLineEdit('absfasaxxxx') self._secret.setStyleSheet(LINE_EDIT_STYLE_SHEET) h_layout3.addWidget(self._secret) self._import_proxy_btn = QPushButton('文件导入代理') self._import_proxy_btn.setStyleSheet(IMPORT_BUTTON_STYLE_SHEET) h_layout3.addWidget(self._import_proxy_btn) v_layout = QVBoxLayout() v_widget = QWidget() v_widget.setLayout(v_layout) self._articles_table = QTableWidget() self._articles_table.setStyleSheet(TABLE_STYLE_SHEET) self._articles_table.setWindowTitle('文章列表') self._articles_table.setColumnCount(3) self._articles_table.setRowCount(0) self._articles_table.setHorizontalHeaderLabels(['文章标题', '文章链接', '阅读数量']) font = self._articles_table.horizontalHeader().font() font.setBold(True) self._articles_table.horizontalHeader().setFont(font) self._articles_table.setSelectionBehavior(QAbstractItemView.SelectItems) self._articles_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) articles_label = QLabel('文章列表') articles_label.setStyleSheet(TITLE_STYLE_SHEET) articles_label.setAlignment(QtCore.Qt.AlignCenter) log_text_label = QLabel('日志记录') log_text_label.setStyleSheet(TITLE_STYLE_SHEET) log_text_label.setAlignment(QtCore.Qt.AlignCenter) self._log_text_browser = QTextBrowser() font = QFont() font.setFamily("宋体") font.setPointSize(10) self._log_text_browser.setFont(font) self._log_text_browser.setStyleSheet(TEXT_BROWSER_STYLE_SHEET) v_layout.addWidget(articles_label) v_layout.addWidget(self._articles_table) v_layout.addWidget(log_text_label) v_layout.addWidget(self._log_text_browser) self.w_layout.addWidget(h_widget1) self.w_layout.addWidget(h_widget2) self.w_layout.addWidget(h_widget3) self.w_layout.addWidget(v_widget) self.setLayout(self.w_layout) palette = QtGui.QPalette() self.setWindowOpacity(0.99) palette.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap(get_backgound()))) self.setPalette(palette) def _init_log_text(self): text = """ <<使用说明>> /************************************************************************************************** | 博客名称输入你的博客名称, 如你的博客主页地址为https://blog.csdn.net/ClassmateLin/, | 那么ClassmateLin就是需要输入的博客名称。 | 线程数量多有助于提升效率, 过多会导致电脑卡顿。 | 访问轮数表示: 访问所有文章的次数,有些文章会随机跳过或多访问几次,提高真实性。 | 访问间隔表示: 访问完一篇文章,隔多久访问下一篇文章。 | 需要先获取代理,或者从文件导入代理, 然后点开始运行。详情见https://github.com/ClassmateLin/csdn-increment-visitor。 | 实现原理见博文: https://blog.csdn.net/ClassmateLin/article/details/104423904 ****************************************************************************************************/ """ self._log_text_browser.append(text) def start(self): """ :return: """ if self._is_start: QMessageBox.information(self, MESSAGE_TITLE, '程序已在运行!', QMessageBox.Yes) return self._is_start = True self._running = True self.get_articles_and_show_to_table() articles = self._articles thread_num = int(self._thread_num_input.text()) if self._proxy_check_box.currentIndex() == 1: # 付费代理 order_no = self._order_no.text() secret = self._secret.text() pro = XunProxy(order_no=order_no, secret=secret) self.log_text_signal.emit('使用付费动态ip代理...') for i in range(thread_num): threading.Thread(target=self.dynamic_proxies_visit, args=(articles, pro.proxy, pro.headers)).start() else: if self._proxies.qsize() == 0: QMessageBox.information(self, MESSAGE_TITLE, '使用免费代理请先获取代理或导入代理!', QMessageBox.Yes) self._is_start = False self._running = False self.log_text_signal.emit('未获取/导入代理, 程序启动失败!') return for i in range(thread_num): threading.Thread(target=self._visit, args=(articles,)).start() self.log_text_signal.emit('开启{}个线程, 开始刷访问量!'.format(str(thread_num))) def get_articles_and_show_to_table(self): """ 获取文章并显示到窗体 :return: """ for rowNum in range(0, self._articles_table.rowCount())[::-1]: # 逆序删除,正序删除会有一些删除不成功 self._articles_table.removeRow(rowNum) self._log_text_browser.append('正在获取博客文章列表...') self._articles = self._get_articles() self._show_article_to_table(self._articles) self._log_text_browser.append('获取博客文章完成...') def _get_articles(self): """ 获取博客文章 :return: """ blog_name = self._blog_name_input.text().strip() art_obj = CSDNArticle(blog_name) articles = art_obj.get_all() return articles def _visit(self, articles): """ 访问文章 :param articles: :return: """ while_num = int(self._while_num_input.text()) count = 0 while self._running and count < while_num: self._visit_single(articles) count += 1 self._running = False self._is_start = False def dynamic_proxies_visit(self, articles, proxy, headers): """ 动态代理访问 :param articles :param proxy: :param headers: :return: """ visitor = RequestVisitor() while self._running: for i in range(len(articles)): if not self._running: self.log_text_signal.emit('停止线程:{}...'.format(str(threading.currentThread().ident))) self._is_start = False return is_visit = random.choice([True, False]) if is_visit: raw_read_num = int(self._articles_table.item(i, 2).text()) # 文章列表显示的访问量 read_num = visitor.visit(articles[i]['url'], proxy, headers) if read_num == 0: self.log_text_signal.emit('代理连接失败...') continue self.update_table_view(articles, i, read_num, raw_read_num) self._is_start = False self.log_text_signal.emit('停止线程:{}, 任务已完成...'.format(str(threading.currentThread().ident))) def update_table_view(self, articles, i, read_num, raw_read_num): """ 更新视图 :param articles: :param i: :param read_num: :param raw_read_num: :return: """ if read_num == 0: # 表示访问失败 self.log_text_signal.emit('文章:{}, 代理访问超时...'.format(articles[i]['title'])) return if read_num == raw_read_num: # 重复IP在60秒内访问同一篇文章不会增加访问量 self.log_text_signal.emit('文章:{}, 访问量增加失败...'.format(articles[i]['title'])) return log_text = '文章:{}访问量+1, 当前访问量:{}。'.format(articles[i]['title'], str(read_num)) self.log_text_signal.emit(log_text) self.table_read_num_signal.emit({ 'row': i, 'col': 2, 'text': str(read_num) }) space = int(self._visit_space_input.text()) time.sleep(space) def _is_reset_proxy(self): """ 是否重置代理 :return: """ self._lock.acquire() if not self._proxies: self._get_proxy() self._lock.release() def _visit_single(self, articles): """ 访问文章 :param articles: :return: """ visitor = RequestVisitor() while self._running and not self._proxies.empty(): proxy = self._proxies.get() for i in range(len(articles)): if not self._running: self.log_text_signal.emit('停止线程:{}...'.format(str(threading.currentThread().ident))) return self._is_reset_proxy() is_visit = random.choice([True, False]) if is_visit: self._lock.acquire() raw_read_num = int(self._articles_table.item(i, 2).text()) # 文章列表显示的访问量 self._lock.release() read_num = visitor.visit(articles[i]['url'], proxy) self.update_table_view(articles, i, read_num, raw_read_num) def _get_proxy_single(self): """ 获取1页代理 :param page :return: """ self.log_text_signal.emit('正在获取免费代理...') proxy_obj = QuickProxy(2) proxies = proxy_obj.get_all() proxies.reverse() for pro in proxies: self._proxies.put(pro) self.log_text_signal.emit('代理:{}'.format(pro)) self.log_text_signal.emit('获取免费代理完成...') def _get_proxies(self): """ 多线程补充代理, 60秒补充一次 :return: """ while True: proxy_obj = QuickProxy(20) proxies = proxy_obj.get_all() for pro in proxies: self._proxies.put(pro) self.log_text_signal.emit('补充代理:{}' .format(pro)) time.sleep(60) def _update_table_read_num(self, data): """ :return: """ self._articles_table.item(data['row'], data['col']).setText(data['text']) def _show_article_to_table(self, articles): """ 将文章列表展示到表格中 :param articles: :return: """ for art in articles: row = self._articles_table.rowCount() self._articles_table.insertRow(row) items = [art['title'], art['url'], art['read_num']] for j in range(len(items)): item = QTableWidgetItem(str(items[j])) self._articles_table.setItem(row, j, item) def _show_to_log(self, text): """ 显示文字并滚动到最后 :param text: :return: """ self._log_text_browser.append(text) self._log_text_browser.moveCursor(self._log_text_browser.textCursor().End) def stop(self): self._running = False self._is_start = False QMessageBox.information(self, MESSAGE_TITLE, '程序已停止!', QMessageBox.Yes) def destroy(self): """ 销毁应用 :return: """ is_ok = QMessageBox.question(self, MESSAGE_TITLE, '确认退出程序?', QMessageBox.Yes | QMessageBox.No) if is_ok == QMessageBox.Yes: os._exit(5) def select_visit_model(self): """ 选择访问模式 :return: """ pass def import_proxy(self): """ 导入代理 :return: """ file_select = QFileDialog.getOpenFileName(self, '导入代理', os.path.abspath(os.getcwd()), 'Text Files (*.txt)') if not file_select[0]: self.log_text_signal.emit('未选择文件!') return self.log_text_signal.emit('选择从文件: {}导入!'.format(file_select[0])) try: data = [] with open(file_select[0], 'r', encoding='utf-8-sig') as f: for line in f: proxy = { 'http': 'http://{}'.format(line.strip()), } data.append(proxy) random.shuffle(data) for pro in data: self._proxies.put(pro) self.log_text_signal.emit('导入一条代理:{}'.format(pro)) self.log_text_signal.emit('成功导入{}个代理.'.format(str(self._proxies.qsize()))) except Exception as e: print(e.args) self.log_text_signal.emit('导入失败: ' + str(e.args)) QMessageBox.information(self, MESSAGE_TITLE, '导入代理失败,请检测文件内容格式和编码(utf8)!', QMessageBox.Yes) def start_window(): """ 启动窗体 :return: """ app = QApplication(sys.argv) w = Window() w.setWindowTitle('CSDN刷访问量') w.show() sys.exit(app.exec_()) if __name__ == '__main__': start_window()
utils.py
# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for testing pymongo """ import collections import contextlib import copy import functools import os import re import shutil import sys import threading import time import warnings from collections import defaultdict from functools import partial from bson import json_util, py3compat from bson.objectid import ObjectId from bson.py3compat import iteritems, string_type from bson.son import SON from pymongo import (MongoClient, monitoring, operations, read_preferences) from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener from pymongo.pool import (_CancellationContext, PoolOptions) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern from test import (client_context, db_user, db_pwd) if sys.version_info[0] < 3: # Python 2.7, use our backport. from test.barrier import Barrier else: from threading import Barrier IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) class BaseListener(object): def __init__(self): self.events = [] def reset(self): self.events = [] def add_event(self, event): self.events.append(event) def event_count(self, event_type): return len(self.events_by_type(event_type)) def events_by_type(self, event_type): """Return the matching events by event class. event_type can be a single class or a tuple of classes. """ return self.matching(lambda e: isinstance(e, event_type)) def matching(self, matcher): """Return the matching events.""" return [event for event in self.events[:] if matcher(event)] def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" wait_until(lambda: self.event_count(event) >= count, 'find %s %s event(s)' % (count, event)) class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): self.add_event(event) def connection_ready(self, event): self.add_event(event) def connection_closed(self, event): self.add_event(event) def connection_check_out_started(self, event): self.add_event(event) def connection_check_out_failed(self, event): self.add_event(event) def connection_checked_out(self, event): self.add_event(event) def connection_checked_in(self, event): self.add_event(event) def pool_created(self, event): self.add_event(event) def pool_ready(self, event): self.add_event(event) def pool_cleared(self, event): self.add_event(event) def pool_closed(self, event): self.add_event(event) class EventListener(monitoring.CommandListener): def __init__(self): self.results = defaultdict(list) def started(self, event): self.results['started'].append(event) def succeeded(self, event): self.results['succeeded'].append(event) def failed(self, event): self.results['failed'].append(event) def started_command_names(self): """Return list of command names started.""" return [event.command_name for event in self.results['started']] def reset(self): """Reset the state of this listener.""" self.results.clear() class WhiteListEventListener(EventListener): def __init__(self, *commands): self.commands = set(commands) super(WhiteListEventListener, self).__init__() def started(self, event): if event.command_name in self.commands: super(WhiteListEventListener, self).started(event) def succeeded(self, event): if event.command_name in self.commands: super(WhiteListEventListener, self).succeeded(event) def failed(self, event): if event.command_name in self.commands: super(WhiteListEventListener, self).failed(event) class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) class _ServerEventListener(object): """Listens to all events.""" def __init__(self): self.results = [] def opened(self, event): self.results.append(event) def description_changed(self, event): self.results.append(event) def closed(self, event): self.results.append(event) def matching(self, matcher): """Return the matching events.""" results = self.results[:] return [event for event in results if matcher(event)] def reset(self): self.results = [] class ServerEventListener(_ServerEventListener, monitoring.ServerListener): """Listens to Server events.""" class ServerAndTopologyEventListener(ServerEventListener, monitoring.TopologyListener): """Listens to Server and Topology events.""" class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" def started(self, event): self.add_event(event) def succeeded(self, event): self.add_event(event) def failed(self, event): self.add_event(event) class MockSocketInfo(object): def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False def close_socket(self, reason): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class MockPool(object): def __init__(self, address, options, handshake=True): self.generation = 0 self._lock = threading.Lock() self.opts = options self.operation_count = 0 def get_socket(self, all_credentials, checkout=False): return MockSocketInfo() def return_socket(self, *args, **kwargs): pass def _reset(self): with self._lock: self.generation += 1 def ready(self): pass def reset(self): self._reset() def reset_without_pause(self): self._reset() def close(self): self._reset() def update_is_writable(self, is_writable): pass def remove_stale_sockets(self, *args, **kwargs): pass class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" def __init__(self, data): def convert(v): if isinstance(v, collections.Mapping): return ScenarioDict(v) if isinstance(v, (py3compat.string_type, bytes)): return v if isinstance(v, collections.Sequence): return [convert(item) for item in v] return v dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) def __getitem__(self, item): try: return dict.__getitem__(self, item) except KeyError: # Unlike a defaultdict, don't set the key, just return a dict. return ScenarioDict({}) class CompareType(object): """Class that compares equal to any object of the given type.""" def __init__(self, type): self.type = type def __eq__(self, other): return isinstance(other, self.type) def __ne__(self, other): """Needed for Python 2.""" return not self.__eq__(other) class FunctionCallRecorder(object): """Utility class to wrap a callable and record its invocations.""" def __init__(self, function): self._function = function self._call_list = [] def __call__(self, *args, **kwargs): self._call_list.append((args, kwargs)) return self._function(*args, **kwargs) def reset(self): """Wipes the call list.""" self._call_list = [] def call_list(self): """Returns a copy of the call list.""" return self._call_list[:] @property def call_count(self): """Returns the number of times the function has been called.""" return len(self._call_list) class TestCreator(object): """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. :Parameters: - `create_test`: callback that returns a test case. The callback must accept the following arguments - a dictionary containing the entire test specification (the `scenario_def`), a dictionary containing the specification for which the test case will be generated (the `test_def`). - `test_class`: the unittest.TestCase class in which to create the test case. - `test_path`: path to the directory containing the JSON files with the test specifications. """ self._create_test = create_test self._test_class = test_class self.test_path = test_path def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a test case.""" if 'minServerVersion' in scenario_def: min_ver = tuple( int(elt) for elt in scenario_def['minServerVersion'].split('.')) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) if 'maxServerVersion' in scenario_def: max_ver = tuple( int(elt) for elt in scenario_def['maxServerVersion'].split('.')) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( run_on_req.get('topology', ['single', 'replicaset', 'sharded'])) @staticmethod def min_server_version(run_on_req): version = run_on_req.get('minServerVersion') if version: min_ver = tuple(int(elt) for elt in version.split('.')) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): version = run_on_req.get('maxServerVersion') if version: max_ver = tuple(int(elt) for elt in version.split('.')) return client_context.version <= max_ver return True def should_run_on(self, scenario_def): run_on = scenario_def.get('runOn', []) if not run_on: # Always run these tests. return True for req in run_on: if (self.valid_topology(req) and self.min_server_version(req) and self.max_server_version(req)): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( lambda: self.should_run_on(scenario_def), "runOn not satisfied", method) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" return scenario_def['tests'] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: # Use tz_aware=False to match how CodecOptions decodes # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( json_util.loads(scenario_stream.read(), json_options=opts)) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): test_name = 'test_%s_%s_%s' % ( dirname, test_type.replace("-", "_").replace('.', '_'), str(test_def['description'].replace(" ", "_").replace( '.', '_'))) new_test = self._create_test( scenario_def, test_def, test_name) new_test = self._ensure_min_max_server_version( scenario_def, new_test) new_test = self.ensure_run_on( scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) def _connection_string(h, authenticate): if h.startswith("mongodb://"): return h elif client_context.auth_enabled and authenticate: return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h)) else: return "mongodb://%s" % (str(h),) def _mongo_client(host, port, authenticate=True, directConnection=False, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options = client_context.default_client_options.copy() if client_context.replica_set_name and not directConnection: client_options['replicaSet'] = client_context.replica_set_name client_options.update(kwargs) client = MongoClient(_connection_string(host, authenticate), port, **client_options) return client def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) def single_client(h=None, p=None, **kwargs): """Make a direct connection, and authenticate if necessary.""" return _mongo_client(h, p, directConnection=True, **kwargs) def rs_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) def rs_client(h=None, p=None, **kwargs): """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) def rs_or_single_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. """ return _mongo_client(h, p, authenticate=False, **kwargs) def rs_or_single_client(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. """ return _mongo_client(h, p, **kwargs) def ensure_all_connected(client): """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ ismaster = client.admin.command("isMaster") if 'setName' not in ismaster: raise ConfigurationError("cluster is not a replica set") target_host_list = set(ismaster['hosts']) connected_host_list = set([ismaster['me']]) admindb = client.get_database('admin') # Run isMaster until we have connected to each host at least once. while connected_host_list != target_host_list: ismaster = admindb.command("isMaster", read_preference=ReadPreference.SECONDARY) connected_host_list.update([ismaster["me"]]) def one(s): """Get one element of a set""" return next(iter(s)) def oid_generated_on_process(oid): """Makes a determination as to whether the given ObjectId was generated by the current process, based on the 5-byte random number in the ObjectId. """ return ObjectId._random() == oid.binary[4:9] def delay(sec): return '''function() { sleep(%f * 1000); return true; }''' % sec def get_command_line(client): command_line = client.admin.command('getCmdLineOpts') assert command_line['ok'] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() def camel_to_upper_camel(camel): return camel[0].upper() + camel[1:] def camel_to_snake_args(arguments): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) arguments[c2s] = arguments.pop(arg_name) return arguments def snake_to_camel(snake): # Regex to convert snake_case to lowerCamelCase. return re.sub(r'_([a-z])', lambda m: m.group(1).upper(), snake) def parse_collection_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) return opts def server_started_with_option(client, cmdline_opt, config_opt): """Check if the server was started with a particular option. :Parameters: - `cmdline_opt`: The command line option (i.e. --nojournal) - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) if 'parsed' in command_line: parsed = command_line['parsed'] if config_opt in parsed: return parsed[config_opt] argv = command_line['argv'] return cmdline_opt in argv def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. return True raise # MongoDB >= 2.0 if 'parsed' in command_line: parsed = command_line['parsed'] # MongoDB >= 2.6 if 'security' in parsed: security = parsed['security'] # >= rc3 if 'authorization' in security: return security['authorization'] == 'enabled' # < rc3 return security.get('auth', False) or bool(security.get('keyFile')) return parsed.get('auth', False) or bool(parsed.get('keyFile')) # Legacy argv = command_line['argv'] return '--auth' in argv or '--keyFile' in argv def server_started_with_nojournal(client): command_line = get_command_line(client) # MongoDB 2.6. if 'parsed' in command_line: parsed = command_line['parsed'] if 'storage' in parsed: storage = parsed['storage'] if 'journal' in storage: return not storage['journal']['enabled'] return server_started_with_option(client, '--nojournal', 'nojournal') def server_is_master_with_slave(client): command_line = get_command_line(client) if 'parsed' in command_line: return command_line['parsed'].get('master', False) return '--master' in command_line['argv'] def drop_collections(db): # Drop all non-system collections in this database. for coll in db.list_collection_names( filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): """Join threads with a 5-minute timeout, assert joins succeeded""" for t in threads: t.join(300) assert not t.is_alive(), "Thread %s hung" % t def connected(client): """Convenience to wait for a newly-constructed client to connect.""" with warnings.catch_warnings(): # Ignore warning that "ismaster" is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) client.admin.command('ismaster') # Force connection. return client def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value. """ start = time.time() interval = min(float(timeout)/100, 0.1) while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(interval) def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" cmd = SON([('replSetStepDown', 1)]) cmd.update(kwargs) # Unfreeze a secondary to ensure a speedy election. client.admin.command( 'replSetFreeze', 0, read_preference=ReadPreference.SECONDARY) client.admin.command(cmd) def is_mongos(client): res = client.admin.command('ismaster') return res.get('msg', '') == 'isdbgrid' def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a specific class of exception, and not a subclass. E.g., check that MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. """ try: fn(*args, **kwargs) except Exception as e: assert e.__class__ == cls, "got %s, expected %s" % ( e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) @contextlib.contextmanager def _ignore_deprecations(): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) yield def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): return wrapped(*args, **kwargs) return wrapper else: return _ignore_deprecations() class DeprecationFilter(object): def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() self.warn_context.__enter__() warnings.simplefilter(action, DeprecationWarning) def stop(self): """Stop filtering deprecations.""" self.warn_context.__exit__() self.warn_context = None def get_pool(client): """Get the standalone, primary, or mongos pool.""" topology = client._get_topology() server = topology.select_server(writable_server_selector) return server.pool def get_pools(client): """Get all pools.""" return [ server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. NTRIALS = 5 NTHREADS = 10 def run_threads(collection, target): """Run a target function in many threads. target is a function taking a Collection and an integer. """ threads = [] for i in range(NTHREADS): bound_target = partial(target, collection, i) threads.append(threading.Thread(target=bound_target)) for t in threads: t.start() for t in threads: t.join(60) assert not t.is_alive() @contextlib.contextmanager def frequent_thread_switches(): """Make concurrency bugs more likely to manifest.""" interval = None if not sys.platform.startswith('java'): if hasattr(sys, 'getswitchinterval'): interval = sys.getswitchinterval() sys.setswitchinterval(1e-6) else: interval = sys.getcheckinterval() sys.setcheckinterval(1) try: yield finally: if not sys.platform.startswith('java'): if hasattr(sys, 'setswitchinterval'): sys.setswitchinterval(interval) else: sys.setcheckinterval(interval) def lazy_client_trial(reset, target, test, get_client): """Test concurrent operations on a lazily-connecting client. `reset` takes a collection and resets it for the next trial. `target` takes a lazily-connecting collection and an index from 0 to NTHREADS, and performs some operation, e.g. an insert. `test` takes the lazily-connecting collection and asserts a post-condition to prove `target` succeeded. """ collection = client_context.client.pymongo_test.test with frequent_thread_switches(): for i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test run_threads(lazy_collection, target) test(lazy_collection) def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" # In Python 3.6 importing gevent.socket raises an ImportWarning. with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) try: import socket import gevent.socket return socket.socket is gevent.socket.socket except ImportError: return False def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" try: import threading import eventlet return (threading.current_thread.__module__ == 'eventlet.green.threading') except ImportError: return False def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() def cdecimal_patched(): """Check if Python 2.7 cdecimal patching is active.""" try: import decimal import cdecimal return decimal is cdecimal except ImportError: return False def disable_replication(client): """Disable replication on all secondaries, requires MongoDB 3.2.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', mode='alwaysOn') def enable_replication(client): """Enable replication on all secondaries, requires MongoDB 3.2.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', mode='off') class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" def __init__(self, *args, **kwargs): self.exc = None super(ExceptionCatchingThread, self).__init__(*args, **kwargs) def run(self): try: super(ExceptionCatchingThread, self).run() except BaseException as exc: self.exc = exc raise def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. mode_string = pref.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref.get('maxStalenessSeconds', -1) tag_sets = pref.get('tag_sets') return read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) def server_name_to_type(name): """Convert a ServerType name to the corresponding value. For SDAM tests.""" # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. if name == 'PossiblePrimary': return SERVER_TYPE.Unknown return getattr(SERVER_TYPE, name) def cat_files(dest, *sources): """Cat multiple files into dest.""" with open(dest, 'wb') as fdst: for src in sources: with open(src, 'rb') as fsrc: shutil.copyfileobj(fsrc, fdst) @contextlib.contextmanager def assertion_context(msg): """A context manager that adds info to an assertion failure.""" try: yield except AssertionError as exc: msg = '%s (%s)' % (exc, msg) py3compat.reraise(type(exc), msg, sys.exc_info()[2]) def parse_spec_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) if 'maxTimeMS' in opts: opts['max_time_ms'] = opts.pop('maxTimeMS') if 'maxCommitTimeMS' in opts: opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') if 'hint' in opts: hint = opts.pop('hint') if not isinstance(hint, string_type): hint = list(iteritems(hint)) opts['hint'] = hint # Properly format 'hint' arguments for the Bulk API tests. if 'requests' in opts: reqs = opts.pop('requests') for req in reqs: if 'name' in req: # CRUD v2 format args = req.pop('arguments', {}) if 'hint' in args: hint = args.pop('hint') if not isinstance(hint, string_type): hint = list(iteritems(hint)) args['hint'] = hint req['arguments'] = args else: # Unified test format bulk_model, spec = next(iteritems(req)) if 'hint' in spec: hint = spec.pop('hint') if not isinstance(hint, string_type): hint = list(iteritems(hint)) spec['hint'] = hint opts['requests'] = reqs return dict(opts) def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] arguments[arg_name] = list(iteritems(sort_dict)) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate"): continue # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) elif c2s == "requests": # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: if 'name' in request: # CRUD v2 format bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) bulk_arguments = camel_to_snake_args(request["arguments"]) else: # Unified test format bulk_model, spec = next(iteritems(request)) bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) bulk_arguments = camel_to_snake_args(spec) requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": arguments['session'] = entity_map[arguments['session']] elif (opname in ('command', 'run_admin_command') and arg_name == 'command'): # Ensure the first key is the command name. ordered_command = SON([(spec['command_name'], 1)]) ordered_command.update(arguments['command']) arguments['command'] = ordered_command elif opname == 'open_download_stream' and arg_name == 'id': arguments['file_id'] = arguments.pop(arg_name) elif opname != 'find' and c2s == 'max_time_ms': # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. arguments['maxTimeMS'] = arguments.pop('max_time_ms') elif opname == 'with_transaction' and arg_name == 'callback': if 'operations' in arguments[arg_name]: # CRUD v2 format callback_ops = arguments[arg_name]['operations'] else: # Unified test format callback_ops = arguments[arg_name] arguments['callback'] = lambda _: with_txn_callback( copy.deepcopy(callback_ops)) elif opname == 'drop_collection' and arg_name == 'collection': arguments['name_or_collection'] = arguments.pop(arg_name) elif opname == 'create_collection' and arg_name == 'collection': arguments['name'] = arguments.pop(arg_name) elif opname == 'create_index' and arg_name == 'keys': arguments['keys'] = list(arguments.pop(arg_name).items()) elif opname == 'drop_index' and arg_name == 'name': arguments['index_or_name'] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name)
dpflow.py
import zmq import multiprocessing as mp from .serialize import loads, dumps def data_sender(id, name, func_iter, *args): context = zmq.Context() sender = context.socket(zmq.PUSH) sender.connect('ipc://@{}'.format(name)) print('start data provider {}-{}'.format(name, id)) while True: data_iter = func_iter(id, *args) for msg in data_iter: # print(id) sender.send( dumps([id, msg]) ) def provider(nr_proc, name, func_iter, *args): proc_ids = [i for i in range(nr_proc)] procs = [] for i in range(nr_proc): w = mp.Process(target=data_sender, args=(proc_ids[i], name, func_iter, *args)) w.deamon = True procs.append( w ) for p in procs: p.start() def receiver(name): context = zmq.Context() receiver = context.socket(zmq.PULL) receiver.bind('ipc://@{}'.format(name)) while True: id, msg = loads( receiver.recv() ) # print(id, end='') yield msg
loop.py
import time import os from multiprocessing import Process, Queue from cache import set_cache, string_cache, set_global_root from Queue import Empty import secondary_functions from json_socket import make_socket, send_json, get_socket session = {} socket = None procs = None def main(): init_thread() def init_thread(*args, **kw): q = Queue() procs = start_procs(q, *args, **kw) return (q, procs,) def start_procs(q, *args, **kw): global procs procs = create_procs(q, *args, **kw) proc, socket_proc = procs print 'starting procs' proc.start() socket_proc.start() return procs def create_procs(q, *args, **kw): proc = Process(target=init, args=(q, ) + args, kwargs=kw) socket_proc = Process(target=socket_init, args=(q, ) + args, kwargs=kw) return (proc, socket_proc,) def kill_processes(): for proc in procs: print 'Joining', proc proc.join() def socket_init(queue, *a, **kw): global socket print 'socket_init', kw['socket_uri'] socket_uri = kw.get('socket_uri', None) print 'init socket' socket = make_socket(socket_uri) run = 1 while run: print '?', message = socket.recv() if message is not None: print '!', len(message) if message == 'kill': print 'socket kill pill' queue.put_nowait(message) run = 0 continue print 'Finish socket' return socket def init(queue, **kw): ''' A initialization function for anything requiring a first boot. ''' socket_uri = kw.get('socket_uri', None) cache_path = kw.get('cache_path', None) # global socket # if socket_uri is not None: # print 'Making loop socket' # socket = make_socket(socket_uri) print 'Init main loop' basepath = os.path.abspath(os.path.dirname(__file__)) set_global_root(cache_path) secondary_functions.init(socket_uri=socket_uri) start(queue, socket_uri=socket_uri) def start(queue, socket_uri=None): run = 1 while run: message = None try: message = queue.get_nowait() print '.', except Empty: time.sleep(.2) if message == 'kill': run = 0 if socket_uri is not None: socket = make_socket(socket_uri) socket.send('kill') continue run = step(message) print 'End Stepper' def step(given_data=None): '''continuous call by a loop''' if given_data is not None: print 'Loop react', given_data secondary_functions.apply_to_context(given_data) return 1 if __name__ == '__main__': main()
task.py
import os import time import shlex import threading import webbrowser from datetime import datetime from datmo.core.controller.base import BaseController from datmo.core.controller.snapshot import SnapshotController from datmo.core.controller.environment.environment import EnvironmentController from datmo.core.entity.task import Task from datmo.core.util.validation import validate from datmo.core.util.spinner import Spinner from datmo.core.util.i18n import get as __ from datmo.core.util.exceptions import ( TaskRunError, RequiredArgumentMissing, ProjectNotInitialized, PathDoesNotExist, TaskInteractiveDetachError, TooManyArgumentsFound, EntityNotFound, DoesNotExist, TaskNoCommandGiven) class TaskController(BaseController): """TaskController inherits from BaseController and manages business logic associated with tasks within the project. Attributes ---------- environment : datmo.core.controller.environment.environment.EnvironmentController used to create environment if new definition file snapshot : datmo.core.controller.snapshot.SnapshotController used to create snapshots before and after tasks Methods ------- create(dictionary) creates a Task object with the permanent parameters _run_helper(environment_id, log_filepath, options) helper for run to start environment and run with the appropriate parameters run(self, id, dictionary=None) runs the task and tracks the run, logs, inputs and outputs list(sort_key=None, sort_order=None) lists all tasks within the project given filters delete(id) deletes the specified task from the project """ def __init__(self): super(TaskController, self).__init__() self.environment = EnvironmentController() self.snapshot = SnapshotController() self.spinner = Spinner() if not self.is_initialized: raise ProjectNotInitialized( __("error", "controller.task.__init__")) def create(self): """Create Task object Returns ------- Task object entity for Task (datmo.core.entity.task.Task) """ # Validate Inputs create_dict = { "model_id": self.model.id, } try: # Create Task self.spinner.start() task_obj = self.dal.task.create(Task(create_dict)) finally: self.spinner.stop() return task_obj def _run_helper(self, environment_id, options, log_filepath): """Run environment with parameters Parameters ---------- environment_id : str the environment id for definition options : dict can include the following values: command : list ports : list Here are some example ports used for common applications. * 'jupyter notebook' - 8888 * flask API - 5000 * tensorboard - 6006 An example input for the above would be ["8888:8888", "5000:5000", "6006:6006"] which maps the running host port (right) to that of the environment (left) name : str volumes : dict mem_limit : str workspace : str detach : bool stdin_open : bool tty : bool log_filepath : str absolute filepath to the log file Returns ------- return_code : int system return code of the environment that was run run_id : str id of the environment run (different from environment id) logs : str output logs from the run """ # Run container with options provided run_options = { "command": options.get('command', None), "ports": options.get('ports', None), "name": options.get('name', None), "volumes": options.get('volumes', None), "mem_limit": options.get('mem_limit', None), "gpu": options.get('gpu', False), "detach": options.get('detach', False), "stdin_open": options.get('stdin_open', False), "tty": options.get('tty', False), "api": False, } workspace = options.get('workspace', None) self.environment.build(environment_id, workspace) # Start a daemon to run workspace on web browser name = options.get('name', None) if workspace is not None: thread = threading.Thread( target=self._open_workspace, args=(name, workspace)) thread.daemon = True # Daemonize thread thread.start() # Start the execution # Run container with environment return_code, run_id, logs = self.environment.run( environment_id, run_options, log_filepath) return return_code, run_id, logs def _open_workspace(self, name, workspace): """Run a daemon to open workspace :param name: name of the environment being run :param workspace: name of the workspace :return: """ workspace_url = self.environment_driver.extract_workspace_url( name, workspace) result = webbrowser.open(workspace_url, new=2) return result def _parse_logs_for_results(self, logs): """Parse log string to extract results and return dictionary. The format of the log line must be "key:value", whitespace will not matter and if there are more than 2 items found when split on ":", it will not log this as a key/value result Note ---- If the same key is found multiple times in the logs, the last occurring one will be the one that is saved. Parameters ---------- logs : str raw string value of output logs Returns ------- dict or None dictionary to represent results from task """ results = {} for line in logs.split("\n"): split_line = line.split(":") if len(split_line) == 2: results[split_line[0].strip()] = split_line[1].strip() if results == {}: results = None return results @staticmethod def _update_environment_run_options(environment_run_option, data_file_path_map, data_directory_path_map): """Update environment run option dictionary with data file and directory mapping and return dictionary. Parameters ---------- data_file_path_map : list list of tuple containing source file absolute path and destination file name data_directory_path_map : list list of tuple containing source directory absolute path and destination directory Returns ------- dict updated dictionary having the environment run options """ if data_file_path_map: # Mount the directory for file path if len(data_file_path_map) > 1: raise TaskRunError( __("error", "cli.run.run.data.files.limit_exceeded")) # select only one tuple, the latest file data_tuple = data_file_path_map[-1] data_file_src_abs_path, data_dst_rel_path = data_tuple # check if the file exists if not os.path.isfile(data_file_src_abs_path): raise TaskRunError( __("error", "cli.run.run.data.src_file.dne", data_file_src_abs_path)) data_file_dirname = os.path.dirname(data_file_src_abs_path) # check if the directory exists if not os.path.isdir(data_file_dirname): raise TaskRunError( __("error", "cli.run.run.data.src_dir.dne", data_file_dirname)) data_volume = { os.path.dirname(data_file_src_abs_path): { 'bind': '/data/', 'mode': 'rw' } } if environment_run_option["volumes"]: environment_run_option["volumes"].update(data_volume) else: environment_run_option["volumes"] = data_volume if data_directory_path_map: # Mount the directory for data for data_tuple in data_directory_path_map: data_src_abs_path, data_dst_rel_path = data_tuple if not os.path.isdir(data_src_abs_path): raise TaskRunError( __("error", "cli.run.run.data.src_dir.dne", data_src_abs_path)) data_volume = { data_src_abs_path: { 'bind': '/data/%s' % data_dst_rel_path, 'mode': 'rw' } } if environment_run_option["volumes"]: environment_run_option["volumes"].update(data_volume) else: environment_run_option["volumes"] = data_volume return environment_run_option def run(self, task_id, snapshot_dict=None, task_dict=None): """Run a task with parameters. If dictionary specified, create a new task with new run parameters. Snapshot objects are created before and after the task to keep track of the state. During the run, you can access task outputs using environment variable DATMO_TASK_DIR or `/task` which points to location for the task files. Create config.json, stats.json and any weights or any file such as graphs and visualizations within that directory for quick access Parameters ---------- task_id : str id for the task you would like to run snapshot_dict : dict set of parameters to create a snapshot (see SnapshotController for details. default is None, which means dictionary with `visible` False will be added to hide auto-generated snapshot) NOTE: `visible` False will always be False regardless of whether the user provides another value for `visible`. task_dict : dict set of parameters to characterize the task run (default is None, which translate to {}, see datmo.core.entity.task.Task for more details on inputs) Returns ------- Task the Task object which completed its run with updated parameters Raises ------ TaskRunError If there is any error in creating files for the task or downstream errors """ # Ensure visible=False is present in the snapshot dictionary if not snapshot_dict: snapshot_dict = {"visible": False} else: snapshot_dict['visible'] = False if not task_dict: task_dict = {} # Obtain Task to run task_obj = self.dal.task.get_by_id(task_id) # Ensure that at least 1 of command, command_list, or interactive is present in task_dict important_task_args = ["command", "command_list", "interactive"] if not task_dict.get('command', task_obj.command) and \ not task_dict.get('command_list', task_obj.command_list) and \ not task_dict.get('interactive', task_obj.interactive): raise RequiredArgumentMissing( __("error", "controller.task.run.arg", " or ".join(important_task_args))) if task_obj.status is None: task_obj.status = "RUNNING" else: raise TaskRunError( __("error", "cli.run.run.already_running", task_obj.id)) # Create Task directory for user during run task_dirpath = os.path.join(".datmo", "tasks", task_obj.id) try: _ = self.file_driver.create(task_dirpath, directory=True) except Exception: raise TaskRunError( __("error", "controller.task.run", task_dirpath)) # Create the before snapshot prior to execution before_snapshot_dict = snapshot_dict.copy() before_snapshot_dict[ 'message'] = "autogenerated snapshot created before task %s is run" % task_obj.id before_snapshot_obj = self.snapshot.create(before_snapshot_dict) # Update the task with pre-execution parameters, prefer list first then look for string command # List command will overwrite a string command if given if task_dict.get('command_list', task_obj.command_list): task_dict['command'] = " ".join( task_dict.get('command_list', task_obj.command_list)) else: if task_dict.get('command', task_obj.command): task_dict['command_list'] = shlex.split( task_dict.get('command', task_obj.command)) elif not task_dict.get('interactive', task_obj.interactive): # If it's not interactive then there is not expected task raise TaskNoCommandGiven() validate("create_task", task_dict) task_obj = self.dal.task.update({ "id": task_obj.id, "before_snapshot_id": task_dict.get('before_snapshot_id', before_snapshot_obj.id), "command": task_dict.get('command', task_obj.command), "command_list": task_dict.get('command_list', task_obj.command_list), "gpu": task_dict.get('gpu', False), "mem_limit": task_dict.get('mem_limit', None), "workspace": task_dict.get('workspace', None), "data_file_path_map": task_dict.get('data_file_path_map', task_obj.data_file_path_map), "data_directory_path_map": task_dict.get('data_directory_path_map', task_obj.data_directory_path_map), "interactive": task_dict.get('interactive', task_obj.interactive), "detach": task_dict.get('detach', task_obj.detach), "ports": task_dict.get('ports', task_obj.ports), "task_dirpath": task_dict.get('task_dirpath', task_dirpath), "log_filepath": task_dict.get('log_filepath', os.path.join(task_dirpath, "task.log")), "start_time": task_dict.get('start_time', datetime.utcnow()), "status": task_obj.status }) # Copy over files from the before_snapshot file collection to task dir file_collection_obj = \ self.dal.file_collection.get_by_id(before_snapshot_obj.file_collection_id) self.file_driver.copytree( os.path.join(self.home, file_collection_obj.path), os.path.join(self.home, task_obj.task_dirpath)) return_code, run_id, logs = 0, None, None try: # Set the parameters set in the task if task_obj.detach and task_obj.interactive: raise TaskInteractiveDetachError( __("error", "controller.task.run.args.detach.interactive")) environment_run_options = { "command": task_obj.command_list, "ports": [] if task_obj.ports is None else task_obj.ports, "name": "datmo-task-" + self.model.id + "-" + task_obj.id, "volumes": {}, "mem_limit": task_obj.mem_limit, "workspace": task_obj.workspace, "gpu": task_obj.gpu, "detach": task_obj.detach, "stdin_open": task_obj.interactive, "tty": task_obj.interactive, "api": False } # mount the data volume environment_run_options = self._update_environment_run_options( environment_run_options, data_file_path_map=task_obj.data_file_path_map, data_directory_path_map=task_obj.data_directory_path_map) # mount the project and task folder environment_run_options["volumes"].update({ os.path.join(self.home, task_obj.task_dirpath): { 'bind': '/task/', 'mode': 'rw' }, self.home: { 'bind': '/home/', 'mode': 'rw' } }) # Run environment via the helper function return_code, run_id, logs = \ self._run_helper(before_snapshot_obj.environment_id, environment_run_options, os.path.join(self.home, task_obj.log_filepath)) except Exception as e: return_code = 1 logs += "Error running task: %" % e.message finally: # Create the after snapshot after execution is completed with new paths after_snapshot_dict = snapshot_dict.copy() after_snapshot_dict[ 'message'] = "autogenerated snapshot created after task %s is run" % task_obj.id # Add in absolute paths from running task directory absolute_task_dir_path = os.path.join(self.home, task_obj.task_dirpath) absolute_paths = [] for item in os.listdir(absolute_task_dir_path): path = os.path.join(absolute_task_dir_path, item) if os.path.isfile(path) or os.path.isdir(path): absolute_paths.append(path) after_snapshot_dict.update({ "paths": absolute_paths, "environment_id": before_snapshot_obj.environment_id, }) after_snapshot_obj = self.snapshot.create(after_snapshot_dict) # (optional) Remove temporary task directory path # Update the task with post-execution parameters end_time = datetime.utcnow() duration = (end_time - task_obj.start_time).total_seconds() update_task_dict = { "id": task_obj.id, "after_snapshot_id": after_snapshot_obj.id, "logs": logs, "status": "SUCCESS" if return_code == 0 else "FAILED", # "results": task_obj.results, # TODO: update during run "end_time": end_time, "duration": duration } if logs is not None: update_task_dict["results"] = self._parse_logs_for_results( logs) if update_task_dict["results"] is not None: snapshot_update_dict = dict({'id': after_snapshot_obj.id}) if after_snapshot_obj.stats: after_snapshot_obj.stats.update( update_task_dict["results"]) else: after_snapshot_obj.stats = update_task_dict["results"] snapshot_update_dict[ "stats"] = after_snapshot_obj.stats.copy() self.dal.snapshot.update(snapshot_update_dict) if run_id is not None: update_task_dict["run_id"] = run_id return self.dal.task.update(update_task_dict) def list(self, sort_key=None, sort_order=None): query = {} return self.dal.task.query(query, sort_key, sort_order) def get(self, task_id): """Get task object and return Parameters ---------- task_id : str id for the task you would like to get Returns ------- datmo.core.entity.task.Task core task object Raises ------ DoesNotExist task does not exist """ try: return self.dal.task.get_by_id(task_id) except EntityNotFound: raise DoesNotExist() def get_files(self, task_id, mode="r"): """Get list of file objects for task id. It will look in the following areas in the following order 1) look in the after snapshot for file collection 2) look in the running task file collection 3) look in the before snapshot for file collection Parameters ---------- task_id : str id for the task you would like to get file objects for mode : str file open mode (default is "r" to open file for read) Returns ------- list list of python file objects Raises ------ DoesNotExist task object does not exist PathDoesNotExist no file objects exist for the task """ try: task_obj = self.dal.task.get_by_id(task_id) except EntityNotFound: raise DoesNotExist() if task_obj.after_snapshot_id: # perform number 1) and return file list return self.snapshot.get_files( task_obj.after_snapshot_id, mode=mode) elif task_obj.task_dirpath: # perform number 2) and return file list return self.file_driver.get( task_obj.task_dirpath, mode=mode, directory=True) elif task_obj.before_snapshot_id: # perform number 3) and return file list return self.snapshot.get_files( task_obj.before_snapshot_id, mode=mode) else: # Error because the task does not have any files associated with it raise PathDoesNotExist() def update(self, task_id, workspace=None, command=None, command_list=None, interactive=False): """Update the task metadata""" if not task_id: raise RequiredArgumentMissing( __("error", "controller.task.delete.arg", "id")) if command_list: command = " ".join(command_list) elif command: command_list = shlex.split(command) validate( "update_task", { "workspace": workspace, "command": command, "command_list": command_list, "interactive": interactive }) update_task_input_dict = {'id': task_id} if workspace is not None: update_task_input_dict['workspace'] = workspace if command is not None: update_task_input_dict['command'] = command if command_list is not None: update_task_input_dict['command_list'] = command_list if interactive: update_task_input_dict['interactive'] = interactive return self.dal.task.update(update_task_input_dict) def delete(self, task_id): if not task_id: raise RequiredArgumentMissing( __("error", "controller.task.delete.arg", "id")) stopped_success = self.stop(task_id) delete_task_success = self.dal.task.delete(task_id) return stopped_success and delete_task_success def stop(self, task_id=None, all=False, status="STOPPED"): """Stop and remove run for the task and update task object statuses Parameters ---------- task_id : str, optional id for the task you would like to stop all : bool, optional if specified, will stop all tasks within project Returns ------- return_code : bool system return code of the stop Raises ------ RequiredArgumentMissing TooManyArgumentsFound """ if task_id is None and all is False: raise RequiredArgumentMissing( __("error", "controller.task.stop.arg.missing", "id")) if task_id and all: raise TooManyArgumentsFound() if task_id: try: task_obj = self.get(task_id) except DoesNotExist: time.sleep(1) task_obj = self.get(task_id) task_match_string = "datmo-task-" + self.model.id + "-" + task_id # Get the environment id associated with the task kwargs = {'match_string': task_match_string} # Get the environment from the task before_snapshot_id = task_obj.before_snapshot_id after_snapshot_id = task_obj.after_snapshot_id if not before_snapshot_id and not after_snapshot_id: # TODO: remove...for now database may not be in sync. no task that has run can have NO before_snapshot_id time.sleep(1) task_obj = self.get(task_id) # TODO: fix stop function to handle stopping containers run from specific environment_ids # if after_snapshot_id: # after_snapshot_obj = self.snapshot.get(after_snapshot_id) # kwargs['environment_id'] = after_snapshot_obj.environment_id # if not after_snapshot_id and before_snapshot_id: # before_snapshot_obj = self.snapshot.get(before_snapshot_id) # kwargs['environment_id'] = before_snapshot_obj.environment_id return_code = self.environment.stop(**kwargs) if all: return_code = self.environment.stop(all=True) # Set stopped task statuses to STOPPED if return success if return_code: if task_id: self.dal.task.update({"id": task_id, "status": status}) if all: task_objs = self.dal.task.query({}) for task_obj in task_objs: self.dal.task.update({"id": task_obj.id, "status": status}) return return_code
mainwindow.py
from __future__ import print_function from __future__ import unicode_literals from gi.repository import Gtk, Gdk, GLib, Pango from pkg_resources import resource_filename # @UnresolvedImport import logging from . import cli from kalite_gtk import validators from kalite_gtk.exceptions import ValidationError logger = logging.getLogger(__name__) def run_async(func): """ http://code.activestate.com/recipes/576684-simple-threading-decorator/ run_async(func) function decorator, intended to make "func" run in a separate thread (asynchronously). Returns the created Thread object E.g.: @run_async def task1(): do_something @run_async def task2(): do_something_too """ from threading import Thread from functools import wraps @wraps(func) def async_func(*args, **kwargs): func_hl = Thread(target=func, args=args, kwargs=kwargs) func_hl.start() # Never return anything, idle_add will think it should re-run the # function because it's a non-False value. return None return async_func class Handler: def __init__(self, mainwindow): # Store new settings here and use for sync / reset # This only includes valid settings self.unsaved_settings = {} self.mainwindow = mainwindow def on_delete_window(self, *args): Gtk.main_quit(*args) @run_async def on_start_button_clicked(self, button): self.log_message("Starting KA Lite...\n") GLib.idle_add(button.set_sensitive, False) for stdout, stderr, returncode in cli.start(): if stdout: self.log_message(stdout) if returncode == 0: self.log_message("KA Lite started!\n") elif stderr: self.log_message(stderr) GLib.idle_add(button.set_sensitive, True) GLib.idle_add(self.mainwindow.update_status) @run_async def on_stop_button_clicked(self, button): GLib.idle_add(button.set_sensitive, False) self.log_message("Stopping KA Lite...\n") for stdout, stderr, returncode in cli.stop(): if stdout: self.log_message(stdout) if returncode: self.log_message("Failed to stop\n") if stderr: self.log_message(stderr) GLib.idle_add(button.set_sensitive, True) GLib.idle_add(self.mainwindow.update_status) @run_async def on_diagnose_button_clicked(self, button): GLib.idle_add(button.set_sensitive, False) start_iter = self.mainwindow.diagnostics.get_start_iter() end_iter = self.mainwindow.diagnostics.get_end_iter() GLib.idle_add(lambda: self.mainwindow.diagnostics.delete(start_iter, end_iter)) stdout, stderr, returncode = cli.diagnose() if stdout: GLib.idle_add(self.mainwindow.diagnostics_message, stdout) if stderr: GLib.idle_add(self.mainwindow.diagnostics_message, stderr) if returncode: GLib.idle_add(self.mainwindow.set_status, "Failed to diagnose!") GLib.idle_add(button.set_sensitive, True) @run_async def on_startup_service_button_clicked(self, button): GLib.idle_add(button.set_sensitive, False) if cli.is_installed(): self.log_message("Removing startup service\n") stdout, stderr, returncode = cli.remove() if stdout: self.log_message(stdout) if stderr: self.log_message(stderr) if returncode: self.log_message("Failed to remove startup service\n") self.log_message("Removed!\n") else: self.log_message("Installing startup service\n") stdout, stderr, returncode = cli.install() if stdout: self.log_message(stdout) if stderr: self.log_message(stderr) if returncode: self.log_message("Failed to install startup service\n") self.log_message("Installed!\n") GLib.idle_add(self.mainwindow.set_from_settngs) GLib.idle_add(button.set_sensitive, True) def on_username_entry_changed(self, entry): value = entry.get_text() if not value: self.mainwindow.default_user_radio_button.set_active(True) return self.mainwindow.username_radiobutton.set_active(True) try: value = validators.username(value) self.unsaved_settings['user'] = value self.settings_changed() except ValidationError: self.mainwindow.settings_feedback_label.set_label( 'Username invalid' ) @run_async def on_save_and_restart_button_clicked(self, button): cli.save_settings() GLib.idle_add(button.set_sensitive, False) GLib.idle_add( self.mainwindow.settings_feedback_label.set_label, 'Settings saved, restarting server...' ) self.log_message("Restarting KA Lite...\n") GLib.idle_add(self.mainwindow.start_button.set_sensitive, False) for stdout, stderr, returncode in cli.start(): if stdout: self.log_message(stdout) if returncode == 0: self.log_message("KA Lite restarted!\n") elif stderr: self.log_message(stderr) GLib.idle_add(button.set_sensitive, False) def settings_changed(self): """ We should make individual handlers for widgets, but this is easier... """ cli.settings.update(self.unsaved_settings) cli.save_settings() self.mainwindow.settings_feedback_label.set_label( 'Settings OK - they will be saved and take effect when you restart the server!' ) def log_message(self, msg): """Logs a message using idle callback""" GLib.idle_add(self.mainwindow.log_message, msg) class MainWindow: def __init__(self): self.builder = Gtk.Builder() glade_file = resource_filename(__name__, "glade/mainwindow.glade") self.builder.add_from_file(glade_file) # Save glade builder XML tree objects to object properties all in # one place so we don't get confused. Don't call get_object other places # PLEASE. self.window = self.builder.get_object('mainwindow') self.log_textview = self.builder.get_object('log_textview') self.diagnose_textview = self.builder.get_object('diagnose_textview') self.diagnostics = self.builder.get_object('diagnostics') self.status_entry = self.builder.get_object('status_label') self.default_user_radio_button = self.builder.get_object('radiobutton_user_default') self.kalite_command_entry = self.builder.get_object('kalite_command_entry') self.port_spinbutton = self.builder.get_object('port_spinbutton') self.content_root_filechooserbutton = self.builder.get_object('content_root_filechooserbutton') self.username_entry = self.builder.get_object('username_entry') self.username_radiobutton = self.builder.get_object('radiobutton_username') self.log = self.builder.get_object('log') self.start_button = self.builder.get_object('start_button') self.stop_button = self.builder.get_object('stop_button') self.diagnose_button = self.builder.get_object('diagnose_button') self.startup_service_button = self.builder.get_object('startup_service_button') self.settings_feedback_label = self.builder.get_object('settings_feedback_label') self.start_stop_instructions_label = self.builder.get_object('start_stop_instructions_label') self.save_and_restart_button = self.builder.get_object('save_and_restart_button') # Save old label so we can continue to replace text self.start_stop_instructions_label_original_text = self.start_stop_instructions_label.get_label() # Auto-connect handlers defined in mainwindow.glade self.builder.connect_signals(Handler(self)) # Style the log like a terminal self.log_textview.override_font( Pango.font_description_from_string('DejaVu Sans Mono 9') ) self.log_textview.override_background_color( Gtk.StateFlags.NORMAL, Gdk.RGBA(0, 0, 0, 1)) self.log_textview.override_color( Gtk.StateFlags.NORMAL, Gdk.RGBA(1, 1, 1, 1)) self.log_textview.override_background_color( Gtk.StateFlags.SELECTED, Gdk.RGBA(0.7, 1, 0.5, 1)) # Style the diagnose view like a terminal self.diagnose_textview.override_font( Pango.font_description_from_string('DejaVu Sans Mono 9') ) self.diagnose_textview.override_background_color( Gtk.StateFlags.NORMAL, Gdk.RGBA(0, 0, 0, 1)) self.diagnose_textview.override_color( Gtk.StateFlags.NORMAL, Gdk.RGBA(1, 1, 1, 1)) self.diagnose_textview.override_background_color( Gtk.StateFlags.SELECTED, Gdk.RGBA(0.7, 1, 0.5, 1)) # Load settings into widgets self.set_from_settings() # Show widgets self.window.show_all() # Update status bar GLib.idle_add(self.update_status) GLib.timeout_add(60 * 1000, lambda: self.update_status or True) def diagnostics_message(self, msg): self.diagnostics.insert_at_cursor(msg) def log_message(self, msg): self.log.insert_at_cursor(msg) def set_from_settings(self): # Insert username of currently running user label = self.start_stop_instructions_label_original_text.replace( '{username}', cli.settings['user'] ) self.start_stop_instructions_label.set_label(label) label = self.default_user_radio_button.get_label() label = label.replace('{default}', cli.DEFAULT_USER) self.default_user_radio_button.set_label(label) self.kalite_command_entry.set_text(cli.settings['command']) self.port_spinbutton.set_value(int(cli.settings['port'])) self.content_root_filechooserbutton.set_filename(cli.settings['content_root']) if cli.DEFAULT_USER != cli.settings['user']: self.username_entry.set_text(cli.settings['user']) self.username_radiobutton.set_active(True) self.default_user_radio_button.set_active(False) else: self.username_radiobutton.set_active(False) self.default_user_radio_button.set_active(True) self.startup_service_button.set_sensitive(cli.has_init_d()) if cli.has_init_d(): if cli.is_installed(): self.startup_service_button.set_label("Remove system service") else: self.startup_service_button.set_label("Install system service") @run_async def update_status(self): GLib.idle_add(self.set_status, "Updating status...") GLib.idle_add(self.set_status, "Server status: " + (cli.status() or "Error fetching status").split("\n")[0]) def set_status(self, status): self.status_entry.set_label(status) if __name__ == "__main__": win = MainWindow() Gtk.main()
Combined_Client.py
import threading from threading import Thread import io import socket import struct import time import picamera import serial from time import sleep # Set server IP address & Port host = "192.168.0.100" port = 8000 # Thread to handle steering commands def steer(): global command_client global ArduinoPort send_inst = True try: while send_inst: sleep(0.1) # Read command received from server recvCommand = command_client.recv(1024) # Quit if the received command is "q" or empty string if (recvCommand=="q" or recvCommand==""): command_client.close print 'Exit' send_inst = False ArduinoPort.write("0") ArduinoPort.close() break; else: # Send the received control command to the Arduino print (recvCommand) ArduinoPort.write(recvCommand.encode()) except: # Stop and reset car if an error occurs recvCommand = "00000000" ArduinoPort.write(recvCommand.encode()) print "Error! Connection lost!" # Thread to handle video transmission def VideoStream(): global command_client global ArduinoPort arduino_connected = 0 #print "starting thread" # Set up command client while arduino_connected == 0: try: ArduinoPort = serial.Serial('/dev/ttyACM0', 115200, timeout=1) arduino_connected = 1 except: arduino_connected = 0 print "Please check USB connection to Arduino" print "" print "Retrying Arduino connection..." # Wait 2 seconds before retrying connection sleep(2) #Initialise everything print "Connecting to command server" command_client = socket.socket() #Create a socket object command_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) command_client.connect((host, port)) #Bind to the port print "Should be connected to command server" print "" #Start thread to handle control requests Thread(target = steer).start() # Wait 0.5 seconds to allow video server to initialise sleep(0.5) # Set up video client print "Connecting to video server" video_socket = socket.socket() video_socket.connect((host, port+1)) # Make a file-like object out of the connection connection = video_socket.makefile('wb') print "Should be connected to video server" print "" try: camera = picamera.PiCamera() camera.resolution = (320, 240) #Set to true if camera is flipped vertically camera.vflip = True camera.hflip = True # Start a preview and let the camera warm up for 2 seconds #camera.start_preview() time.sleep(2) # Note the start time and construct a stream to hold image data # temporarily (we could write it directly to connection but in this # case we want to find out the size of each capture first to keep # our protocol simple) start = time.time() stream = io.BytesIO() for foo in camera.capture_continuous(stream, 'jpeg',use_video_port = True): # Write the length of the capture to the stream and flush to # ensure it actually gets sent connection.write(struct.pack('<L', stream.tell())) connection.flush() # Rewind the stream and send the image data over the wire stream.seek(0) connection.write(stream.read()) # If we've been capturing for more than 30 seconds, quit #if time.time() - start > 30: # break # Reset the stream for the next capture stream.seek(0) stream.truncate() # Write a length of zero to the stream to signal we're done connection.write(struct.pack('<L', 0)) finally: recvCommand = "00000000" ArduinoPort.write(recvCommand.encode()) print "Error! Connection lost!" connection.close() video_socket.close() #Start threads Thread(target = VideoStream).start()
train.py
from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, TensorBoard from multiprocessing import Process from PIL import ImageFile import tensorflow as tf import tensorflow_addons as tfa import datetime import time import csv import os import argparse import models ImageFile.LOAD_TRUNCATED_IMAGES = True parser = argparse.ArgumentParser() parser.add_argument("--data", type=str, default='Flavia', help="is Flavia, Flower, Leafsnap, Cifar or Swedish") parser.add_argument("--models", type=str, default='EfficientNetB0', help="is EfficientNetB0~B3, VGG16\ ResNetV2101, InceptionV3, DenseNet169, NASNetMobile or MobileNetV3") parser.add_argument("--epochs", type=int, default=100, help="number of epochs of training") parser.add_argument("--lr", type=float, default=5e-5, help="Adam: learning rate") parser.add_argument("--af", type=str, default='swish', help="is relu, swish or hswish") parser.add_argument("--at", type=str, default='se', help="is se or eca") parser.add_argument("--dirs", type=str, default='', help="is model data path") parser.add_argument("--load", type=int, default=0, help="number of models") parser.add_argument("--batch_size", type=int, default=32, help="size of the batches") parser.add_argument("--img_size", type=int, default=224, help="size of each image dimension") parser.add_argument("--num", type=int, default=1, help="number of running train.py") opt = parser.parse_args() # train model def trainmodel(): path = 'D:/deeplearning/datasets/imageclassification/' if opt.data in 'Flavia-32': path += 'Flavia-32' elif opt.data in 'Flower-102': path += 'Flower-102' elif opt.data in 'Leafsnap-184': path += 'Leafsnap-184' elif opt.data in 'Swedish-15': path += 'Swedish-15' elif opt.data in 'Cifar-100': path += 'Cifar-100' else: path += 'Flavia-32' classes = int(path.split('-')[-1].split('/')[0]) print(opt) if 'EfficientNet' in opt.models: model = models.myEfficientNet(model_str=opt.models, attention=opt.at, activation=opt.af, input_shape=(opt.img_size, opt.img_size, 3), classes=classes) else: model = models.mymodels(model_str=opt.models, input_shape=(opt.img_size, opt.img_size, 3), classes=classes) METRICS = [ 'accuracy', tf.keras.metrics.Precision(name='Precision'), tf.keras.metrics.Recall(name='Recall') ] model.compile(optimizer=Adam(opt.lr), loss='categorical_crossentropy', metrics=METRICS) # load data train_datagen = ImageDataGenerator(rescale=1./255., rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1.0/255.) train_generator = train_datagen.flow_from_directory("{}/train/".format(path), batch_size=opt.batch_size, class_mode='categorical', target_size=(opt.img_size, opt.img_size)) test_generator = test_datagen.flow_from_directory("{}/test/".format(path), batch_size=opt.batch_size, class_mode='categorical', target_size=(opt.img_size, opt.img_size)) # callback timenow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") opt.dirs = 'logs/{}/{}/'.format(opt.models, timenow) os.makedirs('{}epoch'.format(opt.dirs)) tensorboard_callback = TensorBoard(log_dir="{}".format(opt.dirs), histogram_freq=1) cp_callback = ModelCheckpoint(filepath=opt.dirs+'epoch/{epoch:04d}.h5', period=1, save_weights_only=True, verbose=1) reduce_lr = ReduceLROnPlateau(monitor='accuracy', verbose=1, factor=0.2, patience=5, min_lr=1e-10) # load weights if opt.load > 0: model.load_weights('') history = model.fit(train_generator, epochs=opt.epochs, callbacks=[tensorboard_callback, cp_callback, reduce_lr]) modelnum = history_csv(model, test_generator, history.history, pathcsv='{}/{}-{}.csv'.format(opt.dirs, opt.models, timenow)) model.load_weights('{}epoch/{:04d}.h5'.format(opt.dirs, modelnum)) score = model.evaluate(test_generator) model.save('{}{}-{:.6f}-{:.4f}.h5'.format(opt.dirs, opt.models, score[0], score[1]*100)) print('{}'.format(score)) # save loss acc def history_csv(model, test, history, pathcsv='plt.csv'): str_lossacc = ['id', 'loss', 'accuracy', 'Precision', 'Recall', 'test_loss', 'test_accuracy', 'test_Precision', 'test_Recall'] epochs = len(history[str_lossacc[1]]) modelmax, modelnum = 0, 0 with open(pathcsv, 'w', newline='') as f: writer = csv.DictWriter(f, fieldnames=str_lossacc) writer.writeheader() for i in range(epochs): print('{}/{}'.format(i + 1, opt.epochs)) model.load_weights("{}epoch/{:04d}.h5".format(opt.dirs, i + 1)) score = model.evaluate(test) writer.writerow({str_lossacc[0]: '{}'.format(i + 1), str_lossacc[1]: history[str_lossacc[1]][i], str_lossacc[2]: history[str_lossacc[2]][i], str_lossacc[3]: history[str_lossacc[3]][i], str_lossacc[4]: history[str_lossacc[4]][i], str_lossacc[5]: '{}'.format(score[0]), str_lossacc[6]: '{}'.format(score[1]), str_lossacc[7]: '{}'.format(score[2]), str_lossacc[8]: '{}'.format(score[3])}) if score[1] > modelmax: modelmax = score[1] modelnum = i + 1 writer.writerow({str_lossacc[0]: opt}) f.close() return modelnum def times(x=0): arr_data = ['Flower', 'Leaf', 'Fruit'] arr_at = ['eca', 'se'] arr_af = ['hswish', 'swish', 'relu'] num = 0 for i in range(len(arr_data)): for j in range(len(arr_at)): for k in range(len(arr_af)): num = num + 1 if num == x: opt.data = arr_data[i] opt.at = arr_at[j] opt.af = arr_af[k] print('{} {} {}'.format(opt.data, opt.at, opt.af)) break if __name__ == '__main__': for i in range(opt.num): trainmodel() # modelx = ['EfficientNetB0', 'VGG16', 'ResNet101V2', 'InceptionV3', 'NASNetMobile', # 'DenseNet169', 'MobileNetV3'] # opt.data = 'Leaf' # for i in modelx: # opt.models = i # trainmodel() # for i in range(opt.num): # times(i+1) # trainmodel() # for i in range(opt.num): # times(i+1) # if i != 0: # time.sleep(60 * 2) # p = Process(target=trainmodel) # p.start() # p.join() pass # 2021-06-13 guangjinzheng tensorflow efficientnet
osxui_poco.py
# -*- coding: utf-8 -*- import threading from poco.drivers.std import StdPoco from poco.utils.device import VirtualDevice from poco.drivers.std import DEFAULT_ADDR, DEFAULT_PORT from poco.utils.simplerpc.utils import sync_wrapper class OSXPoco(StdPoco): def __init__(self, selector=None, addr=DEFAULT_ADDR, **options): if 'action_interval' not in options: options['action_interval'] = 0.5 if addr[0] == "localhost" or addr[0] == "127.0.0.1": from poco.drivers.osx.sdk.OSXUI import PocoSDKOSX sdk = PocoSDKOSX(addr) self.SDKProcess = threading.Thread(target=sdk.run) # 创建线程 self.SDKProcess.setDaemon(True) self.SDKProcess.start() dev = VirtualDevice(addr[0]) super(OSXPoco, self).__init__(addr[1], dev, False, **options) self.selector = selector self.connect_window(self.selector) self.set_foreground() @sync_wrapper def connect_window(self, selector): return self.agent.rpc.call("ConnectWindow", selector) @sync_wrapper def set_foreground(self): return self.agent.rpc.call("SetForeground")
basic_gpu_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for basic component wise operations using a GPU device.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import threading import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args from tensorflow.python.platform import test class GPUBinaryOpsTest(test.TestCase): def _compareGPU(self, x, y, np_func, tf_func): with self.cached_session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = self.evaluate(out) with self.cached_session(use_gpu=False) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_cpu = self.evaluate(out) self.assertAllClose(tf_cpu, tf_gpu, rtol=1e-5) def testFloatBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv) self._compareGPU(x, y, np.power, math_ops.pow) def testFloatWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) class MathBuiltinUnaryTest(test.TestCase): def _compare(self, x, np_func, tf_func, use_gpu, atol=1e-6): np_out = np_func(x) with self.cached_session(use_gpu=use_gpu) as sess: inx = ops.convert_to_tensor(x) ofunc = tf_func(inx) tf_out = self.evaluate(ofunc) self.assertAllClose(np_out, tf_out, atol=atol, rtol=1e-4) def _inv(self, x): return 1.0 / x def _rsqrt(self, x): return self._inv(np.sqrt(x)) def _testDtype(self, dtype, use_gpu): data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype) data_gt_1 = data + 2 # for x > 1 self._compare(data, np.abs, math_ops.abs, use_gpu) self._compare(data, np.arccos, math_ops.acos, use_gpu) self._compare(data, np.arcsin, math_ops.asin, use_gpu, atol=1e-4) self._compare(data, np.arcsinh, math_ops.asinh, use_gpu) self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu) self._compare(data, np.arctan, math_ops.atan, use_gpu) self._compare(data, np.ceil, math_ops.ceil, use_gpu) self._compare(data, np.cos, math_ops.cos, use_gpu) self._compare(data, np.cosh, math_ops.cosh, use_gpu) self._compare(data, np.exp, math_ops.exp, use_gpu) self._compare(data, np.floor, math_ops.floor, use_gpu) self._compare(data, np.log, math_ops.log, use_gpu) self._compare(data, np.log1p, math_ops.log1p, use_gpu) self._compare(data, np.negative, math_ops.negative, use_gpu) self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu) self._compare(data, np.sin, math_ops.sin, use_gpu) self._compare(data, np.sinh, math_ops.sinh, use_gpu) self._compare(data, np.sqrt, math_ops.sqrt, use_gpu) self._compare(data, np.square, math_ops.square, use_gpu) self._compare(data, np.tan, math_ops.tan, use_gpu) self._compare(data, np.tanh, math_ops.tanh, use_gpu) self._compare(data, np.arctanh, math_ops.atanh, use_gpu) def testTypes(self): for dtype in [np.float32]: self._testDtype(dtype, use_gpu=True) def testFloorDivide(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) np_out = np.floor_divide(x, y + 0.1) with self.session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y + 0.1) ofunc = inx / iny out_func2 = math_ops.floor(ofunc) tf_out = self.evaluate(out_func2) self.assertAllClose(np_out, tf_out) class BroadcastSimpleTest(test.TestCase): def _GetGradientArgs(self, xs, ys): with self.cached_session(use_gpu=True) as sess: return sess.run(broadcast_gradient_args(xs, ys)) @test_util.run_deprecated_v1 def testBroadcast(self): r0, r1 = self._GetGradientArgs([2, 3, 5], [1]) self.assertAllEqual(r0, []) self.assertAllEqual(r1, [0, 1, 2]) _GRAD_TOL = {dtypes.float32: 1e-3} def _compareGradientX(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) xs = list(x.shape) jacob_t, jacob_n = gradient_checker.compute_gradient( inx, xs, out, zs, x_init_value=x) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGradientY(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) ys = list(np.shape(y)) jacob_t, jacob_n = gradient_checker.compute_gradient( iny, ys, out, zs, x_init_value=y) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGpu(self, x, y, np_func, tf_func): np_ans = np_func(x, y) with self.cached_session(use_gpu=True): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = self.evaluate(out) self.assertAllClose(np_ans, tf_gpu) self.assertShapeEqual(np_ans, out) # TODO(zhifengc/ke): make gradient checker work on GPU. @test_util.run_deprecated_v1 def testGradient(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) self._compareGradientX(x, y, np.true_divide, math_ops.truediv) self._compareGradientY(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv) class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase): """Tests concurrent sessions executing on the same GPU.""" def _run_session(self, session, results): n_iterations = 500 with session as s: data = variables.Variable(1.0) with test_util.force_gpu(): random_seed.set_random_seed(1) matrix1 = variables.Variable( random_ops.truncated_normal([1024, 1]), name='matrix1') matrix2 = variables.Variable( random_ops.truncated_normal([1, 1024]), name='matrix2') x1 = math_ops.multiply(data, matrix1, name='x1') x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1)) x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4') s.run(variables.global_variables_initializer()) for _ in xrange(n_iterations): value = s.run(x4) results.add(value.flat[0]) if len(results) != 1: break @test_util.run_v1_only('b/126596827 needs graph mode in multiple threads') def testConcurrentSessions(self): n_threads = 4 threads = [] results = [] for _ in xrange(n_threads): session = self.session(graph=ops.Graph(), use_gpu=True) results.append(set()) args = (session, results[-1]) threads.append(threading.Thread(target=self._run_session, args=args)) for thread in threads: thread.start() for thread in threads: thread.join() flat_results = set([x for x in itertools.chain(*results)]) self.assertEqual(1, len(flat_results), 'Expected single value, got %r' % flat_results) if __name__ == '__main__': test.main()
bot.py
# coding=utf-8 """ bot.py - Sopel IRC Bot Copyright 2008, Sean B. Palmer, inamidst.com Copyright 2012, Edward Powell, http://embolalia.net Copyright © 2012, Elad Alfassa <elad@fedoraproject.org> Licensed under the Eiffel Forum License 2. http://sopel.dftba.net/ """ import time import imp import os import re import socket import threading from datetime import datetime from sopel import tools import irc from db import SopelDB from tools import (stderr, Nick, PriorityQueue, released, get_command_regexp) import module class Sopel(irc.Bot): NOLIMIT = module.NOLIMIT def __init__(self, config): irc.Bot.__init__(self, config.core) self.config = config """The ``Config`` for the current Sopel instance.""" self.doc = {} """ A dictionary of command names to their docstring and example, if declared. The first item in a callable's commands list is used as the key in version *3.2* onward. Prior to *3.2*, the name of the function as declared in the source code was used. """ self.stats = {} """ A dictionary which maps a tuple of a function name and where it was used to the nuber of times it was used there. """ self.times = {} """ A dictionary mapping lower-case'd nicks to dictionaries which map funtion names to the time which they were last used by that nick. """ self.acivity = {} self.server_capabilities = set() """A set containing the IRCv3 capabilities that the server supports. For servers that do not support IRCv3, this will be an empty set.""" self.enabled_capabilities = set() """A set containing the IRCv3 capabilities that the bot has enabled.""" self._cap_reqs = dict() """A dictionary of capability requests Maps the capability name to a tuple of the prefix ('-', '=', or ''), the name of the requesting module, and the function to call if the request is rejected.""" self.privileges = dict() """A dictionary of channels to their users and privilege levels The value associated with each channel is a dictionary of Nicks to a bitwise integer value, determined by combining the appropriate constants from `module`.""" self.db = SopelDB(config) if self.db.check_table('locales', ['name'], 'name'): self.settings = self.db.locales self.db.preferences = self.db.locales elif self.db.check_table('preferences', ['name'], 'name'): self.settings = self.db.preferences elif self.db.type is not None: self.db.add_table('preferences', ['name'], 'name') self.settings = self.db.preferences self.memory = tools.SopelMemory() """ A thread-safe dict for storage of runtime data to be shared between modules. See `SopelMemory <#tools.Sopel.SopelMemory>`_ """ self.scheduler = Sopel.JobScheduler(self) self.scheduler.start() #Set up block lists #Default to empty if not self.config.core.nick_blocks: self.config.core.nick_blocks = [] if not self.config.core.nick_blocks: self.config.core.host_blocks = [] #Add nicks blocked under old scheme, if present if self.config.core.other_bots: nicks = self.config.core.get_list('nick_blocks') bots = self.config.core.get_list('other_bots') nicks.extend(bots) self.config.core.nick_blocks = nicks self.config.core.other_bots = False self.config.save() self.setup() class JobScheduler(threading.Thread): """Calls jobs assigned to it in steady intervals. JobScheduler is a thread that keeps track of Jobs and calls them every X seconds, where X is a property of the Job. It maintains jobs in a priority queue, where the next job to be called is always the first item. Thread safety is maintained with a mutex that is released during long operations, so methods add_job and clear_jobs can be safely called from the main thread. """ min_reaction_time = 30.0 # seconds """How often should scheduler checks for changes in the job list.""" def __init__(self, bot): """Requires bot as argument for logging.""" threading.Thread.__init__(self) self.bot = bot self._jobs = PriorityQueue() # While PriorityQueue it self is thread safe, this mutex is needed # to stop old jobs being put into new queue after clearing the # queue. self._mutex = threading.Lock() # self.cleared is used for more fine grained locking. self._cleared = False def add_job(self, job): """Add a Job to the current job queue.""" self._jobs.put(job) def clear_jobs(self): """Clear current Job queue and start fresh.""" if self._jobs.empty(): # Guards against getting stuck waiting for self._mutex when # thread is waiting for self._jobs to not be empty. return with self._mutex: self._cleared = True self._jobs = PriorityQueue() def run(self): """Run forever.""" while True: try: self._do_next_job() except Exception: # Modules exceptions are caught earlier, so this is a bit # more serious. Options are to either stop the main thread # or continue this thread and hope that it won't happen # again. self.bot.error() # Sleep a bit to guard against busy-looping and filling # the log with useless error messages. time.sleep(10.0) # seconds def _do_next_job(self): """Wait until there is a job and do it.""" with self._mutex: # Wait until the next job should be executed. # This has to be a loop, because signals stop time.sleep(). while True: job = self._jobs.peek() difference = job.next_time - time.time() duration = min(difference, self.min_reaction_time) if duration <= 0: break with released(self._mutex): time.sleep(duration) self._cleared = False job = self._jobs.get() with released(self._mutex): if job.func.thread: t = threading.Thread( target=self._call, args=(job.func,) ) t.start() else: self._call(job.func) job.next() # If jobs were cleared during the call, don't put an old job # into the new job queue. if not self._cleared: self._jobs.put(job) def _call(self, func): """Wrapper for collecting errors from modules.""" # Sopel.bot.call is way too specialized to be used instead. try: func(self.bot) except Exception: self.bot.error() class Job(object): """ Job is a simple structure that hold information about when a function should be called next. They can be put in a priority queue, in which case the Job that should be executed next is returned. Calling the method next modifies the Job object for the next time it should be executed. Current time is used to decide when the job should be executed next so it should only be called right after the function was called. """ max_catchup = 5 """ This governs how much the scheduling of jobs is allowed to get behind before they are simply thrown out to avoid calling the same function too many times at once. """ def __init__(self, interval, func): """Initialize Job. Args: interval: number of seconds between calls to func func: function to be called """ self.next_time = time.time() + interval self.interval = interval self.func = func def next(self): """Update self.next_time with the assumption func was just called. Returns: A modified job object. """ last_time = self.next_time current_time = time.time() delta = last_time + self.interval - current_time if last_time > current_time + self.interval: # Clock appears to have moved backwards. Reset # the timer to avoid waiting for the clock to # catch up to whatever time it was previously. self.next_time = current_time + self.interval elif delta < 0 and abs(delta) > self.interval * self.max_catchup: # Execution of jobs is too far behind. Give up on # trying to catch up and reset the time, so that # will only be repeated a maximum of # self.max_catchup times. self.next_time = current_time - \ self.interval * self.max_catchup else: self.next_time = last_time + self.interval return self def __cmp__(self, other): """Compare Job objects according to attribute next_time.""" return self.next_time - other.next_time def __str__(self): """Return a string representation of the Job object. Example result: <Job(2013-06-14 11:01:36.884000, 20s, <function upper at 0x02386BF0>)> """ iso_time = str(datetime.fromtimestamp(self.next_time)) return "<Job(%s, %ss, %s)>" % \ (iso_time, self.interval, self.func) def __iter__(self): """This is an iterator. Never stops though.""" return self def setup(self): stderr(u"\nXeniBot será ejecutado en la red y canales indicados. Leyendo módulos...\n\n") self.callables = set() self.shutdown_methods = set() filenames = self.config.enumerate_modules() # Coretasks is special. No custom user coretasks. this_dir = os.path.dirname(os.path.abspath(__file__)) filenames['coretasks'] = os.path.join(this_dir, 'coretasks.py') modules = [] error_count = 0 for name, filename in filenames.iteritems(): try: module = imp.load_source(name, filename) except Exception, e: error_count = error_count + 1 filename, lineno = tools.get_raising_file_and_line() rel_path = os.path.relpath(filename, os.path.dirname(__file__)) raising_stmt = "%s:%d" % (rel_path, lineno) stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt)) else: try: if hasattr(module, 'setup'): module.setup(self) self.register(vars(module)) modules.append(name) except Exception, e: error_count = error_count + 1 filename, lineno = tools.get_raising_file_and_line() rel_path = os.path.relpath( filename, os.path.dirname(__file__) ) raising_stmt = "%s:%d" % (rel_path, lineno) stderr(u"Error in the configuration protocol %s: %s (%s)" % (name, e, raising_stmt)) if modules: stderr(u'\n\n%d módulos existentes,' % (len(modules) - 1)) stderr(u'%d módulos no cargados.\n\n' % error_count) else: stderr(u"Warning: I couldn't find any modules!") self.bind_commands() @staticmethod def is_callable(obj): """Return true if object is a sopel callable. Object must be both be callable and have hashable. Furthermore, it must have either "commands", "rule" or "interval" as attributes to mark it as a sopel callable. """ if not callable(obj): # Check is to help distinguish between sopel callables and objects # which just happen to have parameter commands or rule. return False if (hasattr(obj, 'commands') or hasattr(obj, 'rule') or hasattr(obj, 'interval')): return True return False @staticmethod def is_shutdown(obj): """Return true if object is a sopel shutdown method. Object must be both be callable and named shutdown. """ if (callable(obj) and hasattr(obj, "name") and obj.__name__ == 'shutdown'): return True return False def register(self, variables): """ With the ``__dict__`` attribute from a Sopel module, update or add the trigger commands and rules, to allow the function to be triggered, and shutdown methods, to allow the modules to be notified when sopel is quitting. """ for obj in variables.itervalues(): if self.is_callable(obj): self.callables.add(obj) if self.is_shutdown(obj): self.shutdown_methods.add(obj) def unregister(self, variables): """Unregister all sopel callables in variables, and their bindings. When unloading a module, this ensures that the unloaded modules will not get called and that the objects can be garbage collected. Objects that have not been registered are ignored. Args: variables -- A list of callable objects from a sopel module. """ def remove_func(func, commands): """Remove all traces of func from commands.""" for func_list in commands.itervalues(): if func in func_list: func_list.remove(func) hostmask = "%s!%s@%s" % (self.nick, self.user, socket.gethostname()) sopel = self.SopelWrapper(self, irc.Origin(self, hostmask, [], {})) for obj in variables.itervalues(): if obj in self.callables: self.callables.remove(obj) for commands in self.commands.itervalues(): remove_func(obj, commands) if obj in self.shutdown_methods: try: obj(sopel) except Exception as e: stderr( "Error calling shutdown method for module %s:%s" % (obj.__module__, e) ) self.shutdown_methods.remove(obj) def bind_commands(self): self.commands = {'high': {}, 'medium': {}, 'low': {}} self.scheduler.clear_jobs() def bind(self, priority, regexp, func): # Function name is no longer used for anything, as far as I know, # but we're going to keep it around anyway. if not hasattr(func, 'name'): func.name = func.__name__ # At least for now, only account for the first command listed. if func.__doc__ and hasattr(func, 'commands') and func.commands[0]: if hasattr(func, 'example'): if isinstance(func.example, basestring): # Support old modules that add the attribute directly. example = func.example else: # The new format is a list of dicts. example = func.example[0]["example"] example = example.replace('$nickname', str(self.nick)) else: example = None self.doc[func.commands[0]] = (func.__doc__, example) self.commands[priority].setdefault(regexp, []).append(func) def sub(pattern, self=self): # These replacements have significant order pattern = pattern.replace( '$nickname', r'%s' % re.escape(self.nick) ) return pattern.replace('$nick', r'%s[,:] +' % re.escape(self.nick)) for func in self.callables: if not hasattr(func, 'unblockable'): func.unblockable = False if not hasattr(func, 'priority'): func.priority = 'medium' if not hasattr(func, 'thread'): func.thread = True if not hasattr(func, 'event'): func.event = 'PRIVMSG' else: func.event = func.event.upper() if not hasattr(func, 'rate'): if hasattr(func, 'commands'): func.rate = 0 else: func.rate = 0 if hasattr(func, 'rule'): rules = func.rule if isinstance(rules, basestring): rules = [func.rule] if isinstance(rules, list): for rule in rules: pattern = sub(rule) flags = re.IGNORECASE if rule.find("\n") != -1: flags |= re.VERBOSE regexp = re.compile(pattern, flags) bind(self, func.priority, regexp, func) elif isinstance(func.rule, tuple): # 1) e.g. ('$nick', '(.*)') if len(func.rule) == 2 and isinstance(func.rule[0], str): prefix, pattern = func.rule prefix = sub(prefix) regexp = re.compile(prefix + pattern, re.I) bind(self, func.priority, regexp, func) # 2) e.g. (['p', 'q'], '(.*)') elif len(func.rule) == 2 and \ isinstance(func.rule[0], list): prefix = self.config.core.prefix commands, pattern = func.rule for command in commands: command = r'(%s)\b(?: +(?:%s))?' % ( command, pattern ) regexp = re.compile(prefix + command, re.I) bind(self, func.priority, regexp, func) # 3) e.g. ('$nick', ['p', 'q'], '(.*)') elif len(func.rule) == 3: prefix, commands, pattern = func.rule prefix = sub(prefix) for command in commands: command = r'(%s) +' % command regexp = re.compile( prefix + command + pattern, re.I ) bind(self, func.priority, regexp, func) if hasattr(func, 'commands'): for command in func.commands: prefix = self.config.core.prefix regexp = get_command_regexp(prefix, command) bind(self, func.priority, regexp, func) if hasattr(func, 'interval'): for interval in func.interval: job = Sopel.Job(interval, func) self.scheduler.add_job(job) class SopelWrapper(object): def __init__(self, sopel, origin): self.bot = sopel self.origin = origin def say(self, string, max_messages=1): self.bot.msg(self.origin.sender, string, max_messages) def reply(self, string): if isinstance(string, str): string = string.decode('utf8') self.bot.msg( self.origin.sender, u'%s: %s' % (self.origin.nick, string) ) def action(self, string, recipient=None): if recipient is None: recipient = self.origin.sender self.bot.msg(recipient, '\001ACTION %s\001' % string) def __getattr__(self, attr): return getattr(self.bot, attr) class Trigger(unicode): def __new__(cls, text, origin, bytes, match, event, args, self): s = unicode.__new__(cls, text) s.sender = origin.sender """ The channel (or nick, in a private message) from which the message was sent. """ s.hostmask = origin.hostmask """ Hostmask of the person who sent the message in the form <nick>!<user>@<host> """ s.user = origin.user """Local username of the person who sent the message""" s.nick = origin.nick """The ``Nick`` of the person who sent the message.""" s.event = event """ The IRC event (e.g. ``PRIVMSG`` or ``MODE``) which triggered the message.""" s.bytes = bytes """ The text which triggered the message. Equivalent to ``Trigger.group(0)``. """ s.match = match """ The regular expression ``MatchObject_`` for the triggering line. .. _MatchObject: http://docs.python.org/library/re.html#match-objects """ s.group = match.group """The ``group`` function of the ``match`` attribute. See Python ``re_`` documentation for details.""" s.groups = match.groups """The ``groups`` function of the ``match`` attribute. See Python ``re_`` documentation for details.""" s.args = args """ A tuple containing each of the arguments to an event. These are the strings passed between the event name and the colon. For example, setting ``mode -m`` on the channel ``#example``, args would be ``('#example', '-m')`` """ s.tags = origin.tags """A map of the IRCv3 message tags on the message. If the message had no tags, or the server does not support IRCv3 message tags, this will be an empty dict.""" if len(self.config.core.get_list('admins')) > 0: s.admin = (origin.nick in [Nick(n) for n in self.config.core.get_list('admins')]) else: s.admin = False """ True if the nick which triggered the command is in Sopel's admin list as defined in the config file. """ # Support specifying admins by hostnames if not s.admin and len(self.config.core.get_list('admins')) > 0: for each_admin in self.config.core.get_list('admins'): re_admin = re.compile(each_admin) if re_admin.findall(origin.host): s.admin = True elif '@' in each_admin: temp = each_admin.split('@') re_host = re.compile(temp[1]) if re_host.findall(origin.host): s.admin = True if not self.config.core.owner: s.owner = False elif '@' in self.config.core.owner: s.owner = origin.nick + '@' + \ origin.host == self.config.core.owner else: s.owner = (origin.nick == Nick(self.config.core.owner)) # Bot owner inherits all the admin rights, therefore is considered # admin s.admin = s.admin or s.owner s.host = origin.host if s.sender is not s.nick: # no ops in PM s.ops = self.ops.get(s.sender, []) """ List of channel operators in the channel the message was recived in """ s.halfplus = self.halfplus.get(s.sender, []) """ List of channel half-operators in the channel the message was recived in """ s.isop = (s.nick in s.ops or s.nick in s.halfplus) """True if the user is half-op or an op""" s.voices = self.voices.get(s.sender, []) """ List of channel operators in the channel the message was recived in """ s.isvoice = (s.nick in s.ops or s.nick in s.halfplus or s.nick in s.voices) """True if the user is voiced, has op, or has half-op""" else: s.isop = False s.isvoice = False s.ops = [] s.halfplus = [] s.voices = [] return s def call(self, func, origin, sopel, trigger): nick = trigger.nick if nick not in self.times: self.times[nick] = dict() if not trigger.admin and \ not func.unblockable and \ func in self.times[nick]: timediff = time.time() - self.times[nick][func] if timediff < func.rate: self.times[nick][func] = time.time() self.debug( __file__, "%s prevented from using %s in %s: %d < %d" % ( trigger.nick, func.__name__, trigger.sender, timediff, func.rate ), "verbose" ) return try: exit_code = func(sopel, trigger) except Exception: exit_code = None self.error(origin, trigger) if exit_code != module.NOLIMIT: self.times[nick][func] = time.time() def limit(self, origin, func): if origin.sender and origin.sender.startswith('#'): if self.config.has_section('limit'): limits = self.config.limit.get(origin.sender) if limits and (func.__module__ not in limits): return True return False def dispatch(self, origin, text, args): event, args = args[0], args[1:] wrapper = self.SopelWrapper(self, origin) if self.config.core.nick_blocks or self.config.core.host_blocks: nick_blocked = self._nick_blocked(origin.nick) host_blocked = self._host_blocked(origin.host) else: nick_blocked = host_blocked = None list_of_blocked_functions = [] for priority in ('high', 'medium', 'low'): items = self.commands[priority].items() for regexp, funcs in items: match = regexp.match(text) if not match: continue trigger = self.Trigger( text, origin, text, match, event, args, self ) for func in funcs: if (not trigger.admin and not func.unblockable and (nick_blocked or host_blocked)): function_name = "%s.%s" % ( func.__module__, func.__name__ ) list_of_blocked_functions.append(function_name) continue if event != func.event: continue if self.limit(origin, func): continue if func.thread: targs = (func, origin, wrapper, trigger) t = threading.Thread(target=self.call, args=targs) t.start() else: self.call(func, origin, wrapper, trigger) if list_of_blocked_functions: if nick_blocked and host_blocked: block_type = 'both' elif nick_blocked: block_type = 'nick' else: block_type = 'host' self.debug( __file__, "[%s]%s prevented from using %s." % ( block_type, origin.nick, ', '.join(list_of_blocked_functions) ), "verbose" ) def _host_blocked(self, host): bad_masks = self.config.core.get_list('host_blocks') for bad_mask in bad_masks: bad_mask = bad_mask.strip() if not bad_mask: continue if (re.match(bad_mask + '$', host, re.IGNORECASE) or bad_mask == host): return True return False def _nick_blocked(self, nick): bad_nicks = self.config.core.get_list('nick_blocks') for bad_nick in bad_nicks: bad_nick = bad_nick.strip() if not bad_nick: continue if (re.match(bad_nick + '$', nick, re.IGNORECASE) or Nick(bad_nick) == nick): return True return False def debug(self, tag, text, level): """Sends an error to Sopel's configured ``debug_target``. Args: tag - What the msg will be tagged as. It is recommended to pass __file__ as the tag. If the file exists, a relative path is used as the file. Otherwise the tag is used as it is. text - Body of the message. level - Either verbose, warning or always. Configuration option config.verbose which levels are ignored. Returns: True if message was sent. """ if not self.config.core.verbose: self.config.core.verbose = 'warning' if not self.config.core.debug_target: self.config.core.debug_target = 'stdio' debug_target = self.config.core.debug_target verbosity = self.config.core.verbose if os.path.exists(tag): tag = os.path.relpath(tag, os.path.dirname(__file__)) debug_msg = "[%s] %s" % (tag, text) output_on = { 'verbose': ['verbose'], 'warning': ['verbose', 'warning'], 'always': ['verbose', 'warning', 'always'], } if level in output_on and verbosity in output_on[level]: if debug_target == 'stdio': print debug_msg else: self.msg(debug_target, debug_msg) return True else: return False def _shutdown(self): stderr( 'Calling shutdown for %d modules.' % (len(self.shutdown_methods),) ) hostmask = "%s!%s@%s" % (self.nick, self.user, socket.gethostname()) sopel = self.SopelWrapper(self, irc.Origin(self, hostmask, [], {})) for shutdown_method in self.shutdown_methods: try: stderr( "calling %s.%s" % ( shutdown_method.__module__, shutdown_method.__name__, ) ) shutdown_method(sopel) except Exception as e: stderr( "Error calling shutdown method for module %s:%s" % ( shutdown_method.__module__, e ) ) def cap_req(self, module_name, capability, failure_callback): """Tell Sopel to request a capability when it starts. By prefixing the capability with `-`, it will be ensured that the capability is not enabled. Simmilarly, by prefixing the capability with `=`, it will be ensured that the capability is enabled. Requiring and disabling is "first come, first served"; if one module requires a capability, and another prohibits it, this function will raise an exception in whichever module loads second. An exception will also be raised if the module is being loaded after the bot has already started, and the request would change the set of enabled capabilities. If the capability is not prefixed, and no other module prohibits it, it will be requested. Otherwise, it will not be requested. Since capability requests that are not mandatory may be rejected by the server, as well as by other modules, a module which makes such a request should account for that possibility. The actual capability request to the server is handled after the completion of this function. In the event that the server denies a request, the `failure_callback` function will be called, if provided. The arguments will be a `Sopel` object, and the capability which was rejected. This can be used to disable callables which rely on the capability.""" #TODO raise better exceptions cap = capability[1:] prefix = capability[0] if prefix == '-': if self.connection_registered and cap in self.enabled_capabilities: raise Exception('Can not change capabilities after server ' 'connection has been completed.') entry = self._cap_reqs.get(cap, []) if any((ent[0] != '-' for ent in entry)): raise Exception('Capability conflict') entry.append((prefix, module_name, failure_callback)) self._cap_reqs[cap] = entry else: if prefix != '=': cap = capability prefix = '' if self.connection_registered and (cap not in self.enabled_capabilities): raise Exception('Can not change capabilities after server ' 'connection has been completed.') entry = self._cap_reqs.get(cap, []) if any((ent[0] == '-' for ent in entry)): raise Exception('Capability conflict') entry.append((prefix, module_name, failure_callback)) self._cap_reqs[cap] = entry if __name__ == '__main__': print __doc__
all_bots.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: josem Bot de Telegram que devuelve la IP del servidor Adaptado de los bots de Alberto disponibles en umucv """ # Libreria Telegram from telegram.ext import Updater, CommandHandler, MessageHandler, Filters # OpenCV import cv2 as cv # Enviar bytes from io import BytesIO # Para cargar una imagen dados los bytes from PIL import Image # Para cargar una imagen dado el path import skimage.io as io # Stream de la camara from umucv.stream import Camera # Hilos import threading # Ejecutar comandos import subprocess # Manejo del sistema import os # Updater con el token del bot (otorogado por BotFather) updater = Updater('TOKEN') # Mi id de usuario de Telegram (otorogado por IDBot) my_id = 0 # Camara activada cam_enabled = False # Camara if cam_enabled: cam = Camera(dev='0', size=(640, 480)) # Directorio actual (para execute) dir_path = subprocess.check_output("pwd").decode('utf-8')[:-1] # Comando para iniciar el bot # Recibe el bot y el evento nuevo def start(bot, update): # Responde al mensage recibido update.message.reply_text("Estoy vivo!") # Comando para saludar en el chat # Recibe el bot y el evento nuevo def hello(bot, update): # Responde al mensage recibido update.message.reply_text("Hola {}".format(update.message.from_user.first_name)) # Cierra la camara y el hilo del updater def shutdown(): updater.stop() updater.is_idle = False if cam_enabled: cam.stop() # Comando para detener el bot, lanza shutdown # Recibe el bot y el evento nuevo def stop(bot, update): # Id del chat cid = update.message.chat_id if cid == my_id: # Responde al mensage recibido update.message.reply_text("Bye!") # Lanza shutdown threading.Thread(target=shutdown).start() else: update.message.reply_text("Bot privado!") # Ejecuta el comando para obtener la ip def ip(): # Convierte a string utf-8 y elimina el espacio + salto de linea del final return subprocess.check_output(["hostname", "-I"]).decode('utf-8')[:-2] # Comando para enviar la ip # Recibe el bot y el evento nuevo def get_ip(bot, update): # Id del chat cid = update.message.chat_id if cid == my_id: # Envia el mensaje con la ip update.message.reply_text("Mi IP es {}".format(ip())) else: update.message.reply_text("Bot privado!") # Envia imagen # Recibe el bot, el id del chat y el frame def send_image(bot, cid, frame): # Convierte a RGB frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) # Carga imagen dada el array que es el frame image = Image.fromarray(frame, mode='RGB') # Crea el stream de bytes byte_io = BytesIO() # Guarda la imagen en formato PNG en el stream image.save(byte_io, 'PNG') # Pone el stream en la posicion 0 byte_io.seek(0) # El bot envia la foto al chat bot.sendPhoto(chat_id=cid, photo=byte_io) # Comando para enviar una imagen (captura) # Recibe el bot y el evento nuevo def get_image(bot, update): # Id del chat cid = update.message.chat_id if cid == my_id: if cam_enabled: # Captura de la camara img = cam.frame # Envia la imagen send_image(bot, cid, img) else: update.message.reply_text("No hay webcam") else: update.message.reply_text("Bot privado!") # Comando que imprime los argumentos (echo) # Recibe el bot, el evento nuevo y los argumentos def echo(bot, update, args): # Une los elementos de args y separa con espacios update.message.reply_text(" ".join(args)) # Comando que ejecuta un comando en la terminal # Recibe el bot, el evento nuevo y los argumentos def execute(bot, update, args): global dir_path # Id del chat cid = update.message.chat_id if cid == my_id: # Prompt = whoami + hostname + pwd user = subprocess.check_output("whoami").decode('utf-8')[:-1] host = subprocess.check_output("hostname").decode('utf-8')[:-1] prompt = user + "@" + host + ":" + dir_path + " $ " + " ".join(args) + "\n" # Actualiza el directorio actual si es cd if args[0] == "cd": # Path es el home del usuario if len(args) < 2: dir_path = "/home/"+user out = "" # Si hay path, debe existir elif os.path.isdir(args[1]): dir_path = args[1] out = "" # Si no, error else: out = "No existe el directorio" # Ejecuta el comando (argumentos) y convierte a string utf-8 command = " ".join(args) try: # Con shell=True se pasa una cadena de texto y permite ejecutar comandos con pipes out = subprocess.check_output(command, shell=True, cwd=dir_path).decode('utf-8')[:-1] except subprocess.CalledProcessError: out = "Error ejecutando el comando (exit status diferente de 0)" # Envia el mensaje con la salida y elimina el salto de linea del final update.message.reply_text(prompt+out) else: update.message.reply_text("Bot privado!") # Comando que procesa un mensaje del usuario y lo devuelve invertido # Recibe el bot y el evento nuevo def process_text(bot, update): update.message.reply_text(update.message.text[::-1]) # Comando que procesa una imagen del usuario y la devuelve en gris # Recibe el bot y el evento nuevo def process_image(bot, update): # El identificador de la ultima foto file_id = update.message.photo[-1].file_id # Path para el archivo path = bot.get_file(file_id)['file_path'] # Carga la imagen dado el path img = io.imread(path) # Responde con las dimensiones de la imagen update.message.reply_text("{}x{}".format(img.shape[1], img.shape[0])) # Imagen en gris r = cv.cvtColor(cv.cvtColor(img, cv.COLOR_RGB2GRAY), cv.COLOR_GRAY2RGB) # Envia la imagen send_image(bot, update.message.chat_id, r) # Main def main(): # Manejadores updater.dispatcher.add_handler(CommandHandler('start', start)) updater.dispatcher.add_handler(CommandHandler('hello', hello)) updater.dispatcher.add_handler(CommandHandler('stop', stop)) updater.dispatcher.add_handler(CommandHandler('ip', get_ip)) updater.dispatcher.add_handler(CommandHandler('image', get_image)) updater.dispatcher.add_handler(CommandHandler('echo', echo, pass_args=True)) updater.dispatcher.add_handler(CommandHandler('exec', execute, pass_args=True)) updater.dispatcher.add_handler(MessageHandler(Filters.text, process_text)) updater.dispatcher.add_handler(MessageHandler(Filters.photo, process_image)) # Comienza el bot updater.start_polling() # Mensaje inicial bot = updater.bot bot.sendMessage(chat_id=my_id, text="Mi IP es {}".format(ip())) # Bloquea la ejecucion hasta que se para el bot updater.idle() # Programa principal if __name__ == '__main__': main()
test_state.py
import logging import os import textwrap import threading import time import pytest import salt.loader import salt.utils.atomicfile import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils log = logging.getLogger(__name__) pytestmark = [ pytest.mark.windows_whitelisted, ] def test_show_highstate(state, state_testfile_dest_path): """ state.show_highstate """ high = state.show_highstate() assert isinstance(high, dict) assert str(state_testfile_dest_path) in high assert high[str(state_testfile_dest_path)]["__env__"] == "base" def test_show_lowstate(state): """ state.show_lowstate """ low = state.show_lowstate() assert isinstance(low, list) for entry in low: assert isinstance(entry, dict) def test_show_states(state): """ state.show_states """ states = state.show_states() assert isinstance(states, list) for entry in states: assert isinstance(entry, str) assert states == ["core"] def test_show_states_missing_sls(state, state_tree): """ Test state.show_states with a sls file defined in a top file is missing """ top_sls_contents = """ base: '*': - core - does-not-exist """ with pytest.helpers.temp_file("top.sls", top_sls_contents, state_tree): states = state.show_states() assert isinstance(states, list) assert states == ["No matching sls found for 'does-not-exist' in env 'base'"] def test_catch_recurse(state, state_tree): """ state.show_sls used to catch a recursive ref """ sls_contents = """ mysql: service: - running - require: - file: /etc/mysql/my.cnf /etc/mysql/my.cnf: file: - managed - source: salt://master.cnf - require: - service: mysql """ with pytest.helpers.temp_file("recurse-fail.sls", sls_contents, state_tree): ret = state.sls("recurse-fail") assert ret.failed assert ( 'A recursive requisite was found, SLS "recurse-fail" ID "/etc/mysql/my.cnf" ID "mysql"' in ret.errors ) RECURSE_SLS_ONE = """ snmpd: pkg: - installed service: - running - require: - pkg: snmpd - watch: - file: /etc/snmp/snmpd.conf /etc/snmp/snmpd.conf: file: - managed - source: salt://snmpd/snmpd.conf.jinja - template: jinja - user: root - group: root - mode: "0600" - require: - pkg: snmpd """ RECURSE_SLS_TWO = """ nagios-nrpe-server: pkg: - installed service: - running - watch: - file: /etc/nagios/nrpe.cfg /etc/nagios/nrpe.cfg: file: - managed - source: salt://baseserver/nrpe.cfg - require: - pkg: nagios-nrpe-server """ @pytest.mark.parametrize( "sls_contents, expected_in_output", [(RECURSE_SLS_ONE, "snmpd"), (RECURSE_SLS_TWO, "/etc/nagios/nrpe.cfg")], ids=("recurse-scenario-1", "recurse-scenario-2"), ) def test_no_recurse(state, state_tree, sls_contents, expected_in_output): """ verify that a sls structure is NOT a recursive ref """ with pytest.helpers.temp_file("recurse-ok.sls", sls_contents, state_tree): ret = state.show_sls("recurse-ok") assert expected_in_output in ret def test_running_dictionary_consistency(state): """ Test the structure of the running dictionary so we don't change it without deprecating/documenting the change """ running_dict_fields = { "__id__", "__run_num__", "__sls__", "changes", "comment", "duration", "name", "result", "start_time", } sls = state.single("test.succeed_without_changes", name="gndn") ret_values_set = set(sls.full_return.keys()) assert running_dict_fields.issubset(ret_values_set) def test_running_dictionary_key_sls(state, state_tree): """ Ensure the __sls__ key is either null or a string """ sls1 = state.single("test.succeed_with_changes", name="gndn") assert "__sls__" in sls1.full_return assert sls1.full_return["__sls__"] is None sls_contents = """ gndn: test.succeed_with_changes """ with pytest.helpers.temp_file("gndn.sls", sls_contents, state_tree): sls2 = state.sls(mods="gndn") for state_return in sls2: assert "__sls__" in state_return.full_return assert isinstance(state_return.full_return["__sls__"], str) @pytest.fixture def requested_sls_key(minion_opts, state_tree): if not salt.utils.platform.is_windows(): sls_contents = """ count_root_dir_contents: cmd.run: - name: 'ls -a / | wc -l' """ sls_key = "cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run" else: sls_contents = r""" count_root_dir_contents: cmd.run: - name: 'Get-ChildItem C:\ | Measure-Object | %{$_.Count}' - shell: powershell """ sls_key = ( r"cmd_|-count_root_dir_contents_|-Get-ChildItem C:\ | Measure-Object |" r" %{$_.Count}_|-run" ) try: with pytest.helpers.temp_file( "requested.sls", sls_contents, state_tree ) as sls_path: yield sls_key finally: cache_file = os.path.join(minion_opts["cachedir"], "req_state.p") if os.path.exists(cache_file): os.remove(cache_file) def test_request(state, requested_sls_key): """ verify sending a state request to the minion(s) """ ret = state.request("requested") assert ret[requested_sls_key]["result"] is None def test_check_request(state, requested_sls_key): """ verify checking a state request sent to the minion(s) """ ret = state.request("requested") assert ret[requested_sls_key]["result"] is None ret = state.check_request() assert ret["default"]["test_run"][requested_sls_key]["result"] is None def test_clear_request(state, requested_sls_key): """ verify clearing a state request sent to the minion(s) """ ret = state.request("requested") assert ret[requested_sls_key]["result"] is None ret = state.clear_request() assert ret is True def test_run_request_succeeded(state, requested_sls_key): """ verify running a state request sent to the minion(s) """ ret = state.request("requested") assert ret[requested_sls_key]["result"] is None ret = state.run_request() assert ret[requested_sls_key]["result"] is True def test_run_request_failed_no_request_staged(state, requested_sls_key): """ verify not running a state request sent to the minion(s) """ ret = state.request("requested") assert ret[requested_sls_key]["result"] is None ret = state.clear_request() assert ret is True ret = state.run_request() assert ret == {} def test_issue_1876_syntax_error(state, state_tree, tmp_path): """ verify that we catch the following syntax error:: /tmp/salttest/issue-1876: file: - managed - source: salt://testfile file.append: - text: foo """ testfile = tmp_path / "issue-1876.txt" sls_contents = """ {}: file: - managed - source: salt://testfile file.append: - text: foo """.format( testfile ) with pytest.helpers.temp_file("issue-1876.sls", sls_contents, state_tree): ret = state.sls("issue-1876") assert ret.failed errmsg = ( "ID '{}' in SLS 'issue-1876' contains multiple state declarations of the" " same type".format(testfile) ) assert errmsg in ret.errors def test_issue_1879_too_simple_contains_check(state, state_tree, tmp_path): testfile = tmp_path / "issue-1979.txt" init_sls_contents = """ {}: file: - touch """.format( testfile ) step1_sls_contents = """ {}: file.append: - text: | # set variable identifying the chroot you work in (used in the prompt below) if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then debian_chroot=$(cat /etc/debian_chroot) fi """.format( testfile ) step2_sls_contents = """ {}: file.append: - text: | # enable bash completion in interactive shells if [ -f /etc/bash_completion ] && ! shopt -oq posix; then . /etc/bash_completion fi """.format( testfile ) expected = textwrap.dedent( """\ # set variable identifying the chroot you work in (used in the prompt below) if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then debian_chroot=$(cat /etc/debian_chroot) fi # enable bash completion in interactive shells if [ -f /etc/bash_completion ] && ! shopt -oq posix; then . /etc/bash_completion fi """ ) issue_1879_dir = state_tree / "issue-1879" with pytest.helpers.temp_file( "init.sls", init_sls_contents, issue_1879_dir ), pytest.helpers.temp_file( "step-1.sls", step1_sls_contents, issue_1879_dir ), pytest.helpers.temp_file( "step-2.sls", step2_sls_contents, issue_1879_dir ): # Create the file ret = state.sls("issue-1879") for staterun in ret: assert staterun.result is True # The first append ret = state.sls("issue-1879.step-1") for staterun in ret: assert staterun.result is True # The second append ret = state.sls("issue-1879.step-2") for staterun in ret: assert staterun.result is True # Does it match? contents = testfile.read_text() assert contents == expected # Make sure we don't re-append existing text ret = state.sls("issue-1879.step-1") for staterun in ret: assert staterun.result is True ret = state.sls("issue-1879.step-2") for staterun in ret: assert staterun.result is True # Does it match? contents = testfile.read_text() assert contents == expected def test_include(state, state_tree, tmp_path): testfile_path = tmp_path / "testfile" testfile_path.write_text("foo") include_test_path = tmp_path / "include-test.txt" to_include_test_path = tmp_path / "to-include-test.txt" exclude_test_path = tmp_path / "exclude-test.txt" to_include_sls_contents = """ {}: file.managed: - source: salt://testfile """.format( to_include_test_path ) include_sls_contents = """ include: - to-include-test {}: file.managed: - source: salt://testfile """.format( include_test_path ) with pytest.helpers.temp_file( "testfile", "foo", state_tree ), pytest.helpers.temp_file( "to-include-test.sls", to_include_sls_contents, state_tree ), pytest.helpers.temp_file( "include-test.sls", include_sls_contents, state_tree ): ret = state.sls("include-test") for staterun in ret: assert staterun.result is True assert include_test_path.exists() assert to_include_test_path.exists() assert exclude_test_path.exists() is False def test_exclude(state, state_tree, tmp_path): testfile_path = tmp_path / "testfile" testfile_path.write_text("foo") include_test_path = tmp_path / "include-test.txt" to_include_test_path = tmp_path / "to-include-test.txt" exclude_test_path = tmp_path / "exclude-test.txt" to_include_sls_contents = """ {}: file.managed: - source: salt://testfile """.format( to_include_test_path ) include_sls_contents = """ include: - to-include-test {}: file.managed: - source: salt://testfile """.format( include_test_path ) exclude_sls_contents = """ exclude: - to-include-test include: - include-test {}: file.managed: - source: salt://testfile """.format( exclude_test_path ) with pytest.helpers.temp_file( "testfile", "foo", state_tree ), pytest.helpers.temp_file( "to-include-test.sls", to_include_sls_contents, state_tree ), pytest.helpers.temp_file( "include-test.sls", include_sls_contents, state_tree ), pytest.helpers.temp_file( "exclude-test.sls", exclude_sls_contents, state_tree ): ret = state.sls("exclude-test") for staterun in ret: assert staterun.result is True assert include_test_path.exists() assert exclude_test_path.exists() assert to_include_test_path.exists() is False def test_issue_2068_template_str(state, state_tree): template_str_no_dot_sls_contents = """ required_state: test: - succeed_without_changes requiring_state: test: - succeed_without_changes - require: - test: required_state """ template_str_sls_contents = """ required_state: test.succeed_without_changes requiring_state: test.succeed_without_changes: - require: - test: required_state """ with pytest.helpers.temp_file( "issue-2068-no-dot.sls", template_str_no_dot_sls_contents, state_tree ) as template_str_no_dot_path, pytest.helpers.temp_file( "issue-2068.sls", template_str_sls_contents, state_tree ) as template_str_path: # If running this state with state.sls works, so should using state.template_str ret = state.sls("issue-2068-no-dot") for staterun in ret: assert staterun.result is True template_str_no_dot_contents = template_str_no_dot_path.read_text() ret = state.template_str(template_str_no_dot_contents) for staterun in ret: assert staterun.result is True # Now using state.template ret = state.template(str(template_str_no_dot_path)) for staterun in ret: assert staterun.result is True # Now the problematic #2068 including dot's ret = state.sls("issue-2068") for staterun in ret: assert staterun.result is True template_str_contents = template_str_path.read_text() ret = state.template_str(template_str_contents) for staterun in ret: assert staterun.result is True # Now using state.template ret = state.template(str(template_str_path)) for staterun in ret: assert staterun.result is True @pytest.mark.parametrize("item", ("include", "exclude", "extends")) def test_template_str_invalid_items(state, item): TEMPLATE = textwrap.dedent( """\ {}: - existing-state /tmp/test-template-invalid-items: file: - managed - source: salt://testfile """.format( item ) ) ret = state.template_str(TEMPLATE.format(item)) assert ret.failed errmsg = ( "The '{}' declaration found on '<template-str>' is invalid when " "rendering single templates".format(item) ) assert errmsg in ret.errors @pytest.mark.skip_on_windows( reason=( "Functional testing this on windows raises unicode errors. " "Tested in tests/pytests/integration/modules/state/test_state.py" ) ) def test_pydsl(state, state_tree, tmp_path): """ Test the basics of the pydsl """ testfile = tmp_path / "testfile" sls_contents = """ #!pydsl state("{}").file("touch") """.format( testfile ) with pytest.helpers.temp_file("pydsl.sls", sls_contents, state_tree): ret = state.sls("pydsl") for staterun in ret: assert staterun.result is True assert testfile.exists() def test_issues_7905_and_8174_sls_syntax_error(state, state_tree): """ Call sls file with yaml syntax error. Ensure theses errors are detected and presented to the user without stack traces. """ badlist_1_sls_contents = """ # Missing " " between "-" and "foo" or "name" A: cmd.run: -name: echo foo -foo: - bar """ badlist_2_sls_contents = """ # C should fail with bad list error message B: # ok file.exist: - name: /foo/bar/foobar # ok /foo/bar/foobar: file.exist # nok C: /foo/bar/foobar: file.exist """ with pytest.helpers.temp_file( "badlist1.sls", badlist_1_sls_contents, state_tree ), pytest.helpers.temp_file("badlist2.sls", badlist_2_sls_contents, state_tree): ret = state.sls("badlist1") assert ret.failed assert ret.errors == ["State 'A' in SLS 'badlist1' is not formed as a list"] ret = state.sls("badlist2") assert ret.failed assert ret.errors == ["State 'C' in SLS 'badlist2' is not formed as a list"] @pytest.mark.slow_test def test_retry_option(state, state_tree): """ test the retry option on a simple state with defaults ensure comment is as expected ensure state duration is greater than configured the passed (interval * attempts) """ sls_contents = """ file_test: file.exists: - name: /path/to/a/non-existent/file.txt - retry: until: True attempts: 3 interval: 1 splay: 0 """ expected_comment = ( 'Attempt 1: Returned a result of "False", with the following ' 'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"' ) with pytest.helpers.temp_file("retry.sls", sls_contents, state_tree): ret = state.sls("retry") for state_return in ret: assert state_return.result is False assert expected_comment in state_return.comment assert state_return.full_return["duration"] >= 3 def test_retry_option_success(state, state_tree, tmp_path): """ test a state with the retry option that should return True immediately (i.e. no retries) """ testfile = tmp_path / "testfile" testfile.touch() sls_contents = """ file_test: file.exists: - name: {} - retry: until: True attempts: 5 interval: 2 splay: 0 """.format( testfile ) duration = 4 if salt.utils.platform.is_windows(): duration = 16 with pytest.helpers.temp_file("retry.sls", sls_contents, state_tree): ret = state.sls("retry") for state_return in ret: assert state_return.result is True assert state_return.full_return["duration"] < duration # It should not take 2 attempts assert "Attempt 2" not in state_return.comment @pytest.mark.slow_test def test_retry_option_eventual_success(state, state_tree, tmp_path): """ test a state with the retry option that should return True, eventually """ testfile1 = tmp_path / "testfile-1" testfile2 = tmp_path / "testfile-2" def create_testfile(testfile1, testfile2): while True: if testfile1.exists(): break time.sleep(2) testfile2.touch() thread = threading.Thread(target=create_testfile, args=(testfile1, testfile2)) sls_contents = """ file_test_a: file.managed: - name: {} - content: 'a' file_test: file.exists: - name: {} - retry: until: True attempts: 5 interval: 2 splay: 0 - require: - file_test_a """.format( testfile1, testfile2 ) with pytest.helpers.temp_file("retry.sls", sls_contents, state_tree): thread.start() ret = state.sls("retry") for state_return in ret: assert state_return.result is True assert state_return.full_return["duration"] > 4 # It should not take 5 attempts assert "Attempt 5" not in state_return.comment @pytest.mark.slow_test def test_state_non_base_environment(state, state_tree_prod, tmp_path): """ test state.sls with saltenv using a nonbase environment with a salt source """ testfile = tmp_path / "testfile" sls_contents = """ {}: file.managed: - content: foo """.format( testfile ) with pytest.helpers.temp_file("non-base-env.sls", sls_contents, state_tree_prod): ret = state.sls("non-base-env", saltenv="prod") for state_return in ret: assert state_return.result is True assert testfile.exists() @pytest.mark.skip_on_windows( reason="Skipped until parallel states can be fixed on Windows" ) def test_parallel_state_with_long_tag(state, state_tree): """ This tests the case where the state being executed has a long ID dec or name and states are being run in parallel. The filenames used for the parallel state cache were previously based on the tag for each chunk, and longer ID decs or name params can cause the cache file to be longer than the operating system's max file name length. To counter this we instead generate a SHA1 hash of the chunk's tag to use as the cache filename. This test will ensure that long tags don't cause caching failures. See https://github.com/saltstack/salt/issues/49738 for more info. """ short_command = "helloworld" long_command = short_command * 25 sls_contents = """ test_cmd_short: cmd.run: - name: {} - parallel: True test_cmd_long: cmd.run: - name: {} - parallel: True """.format( short_command, long_command ) with pytest.helpers.temp_file("issue-49738.sls", sls_contents, state_tree): ret = state.sls( "issue-49738", __pub_jid="1", # Because these run in parallel we need a fake JID ) comments = sorted([x.comment for x in ret]) expected = sorted( ['Command "{}" run'.format(x) for x in (short_command, long_command)] ) assert comments == expected, "{} != {}".format(comments, expected) @pytest.mark.skip_on_darwin(reason="Test is broken on macosx") @pytest.mark.skip_on_windows( reason=( "Functional testing this on windows raises unicode errors. " "Tested in tests/pytests/integration/modules/state/test_state.py" ) ) def test_state_sls_unicode_characters(state, state_tree): """ test state.sls when state file contains non-ascii characters """ sls_contents = """ echo1: cmd.run: - name: "echo 'This is Æ test!'" """ with pytest.helpers.temp_file("issue-46672.sls", sls_contents, state_tree): ret = state.sls("issue-46672") expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run" assert expected in ret def test_state_sls_integer_name(state, state_tree): """ This tests the case where the state file is named only with integers """ sls_contents = """ always-passes: test.succeed_without_changes """ state_id = "test_|-always-passes_|-always-passes_|-succeed_without_changes" with pytest.helpers.temp_file("12345.sls", sls_contents, state_tree): ret = state.sls("12345") assert state_id in ret for state_return in ret: assert state_return.result is True assert "Success!" in state_return.comment ret = state.sls(mods=12345) assert state_id in ret for state_return in ret: assert state_return.result is True assert "Success!" in state_return.comment def test_state_sls_lazyloader_allows_recursion(state, state_tree): """ This tests that referencing dunders like __salt__ work context: https://github.com/saltstack/salt/pull/51499 """ sls_contents = """ {% if 'nonexistent_module.function' in salt %} {% do salt.log.warning("Module is available") %} {% endif %} always-passes: test.succeed_without_changes: - name: foo """ state_id = "test_|-always-passes_|-foo_|-succeed_without_changes" with pytest.helpers.temp_file("issue-51499.sls", sls_contents, state_tree): ret = state.sls("issue-51499") assert state_id in ret for state_return in ret: assert state_return.result is True assert "Success!" in state_return.comment
minimizer.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base classes for other minimizers.""" import copy import functools import os import tempfile import threading import time import errors DEFAULT_CLEANUP_INTERVAL = 20 DEFAULT_THREAD_COUNT = 8 DEFAULT_TESTS_PER_THREAD = 4 MAX_MERGE_BATCH_SIZE = 32 PROGRESS_REPORT_INTERVAL = 300 class DummyLock(object): """Dummy to replace threading.Lock for single-threaded tests.""" def __enter__(self): pass def __exit__(self, exec_type, value, traceback): pass def __nonzero__(self): return False class TestQueue(object): """Queue to store commands that should be executed to test hypotheses.""" def __init__(self, thread_count, deadline_check=None, progress_report_function=None, per_thread_cleanup_function=None): self.thread_count = thread_count self.deadline_check = deadline_check self.progress_report_function = progress_report_function self.per_thread_cleanup_function = per_thread_cleanup_function self.lock = threading.Lock() self.queue = [] def _pop(self): """Pull a single hypothesis to process from the queue.""" with self.lock: if not self.queue: return None return self.queue.pop(0) def _work(self): """Process items from the queue until it is empty.""" while not self.deadline_check or not self.deadline_check(soft_check=True): current_item = self._pop() if not current_item: break test, test_function, completion_callback, should_run = current_item # pylint: disable=unpacking-non-sequence if not should_run(): continue result = test_function(test) completion_callback(result) if self.per_thread_cleanup_function: self.per_thread_cleanup_function() # Abort if we have exceeded the deadline for this operation. if self.deadline_check and self.deadline_check(soft_check=True): break def _cleanup(self): """Clean up the queue to be sure that no more tasks will be executed.""" with self.lock: self.queue = [] def push(self, test, test_function, completion_callback, should_run=lambda: True): """Add a test to the queue and a callback to run on completion.""" with self.lock: self.queue.append((test, test_function, completion_callback, should_run)) def force(self, test, test_function, completion_callback, should_run=lambda: True): """Force a test to the front of the queue.""" entry = (test, test_function, completion_callback, should_run) with self.lock: self.queue.insert(0, entry) def size(self): """Return the number of unprocessed tasks in the queue.""" return len(self.queue) def process(self): """Process all tests in the queue and block until completion.""" while self.queue: threads = [ threading.Thread(target=self._work) for _ in xrange(self.thread_count) ] for thread in threads: thread.start() while any([thread.is_alive() for thread in threads]): if self.deadline_check: self.deadline_check(cleanup_function=self._cleanup) if self.progress_report_function: self.progress_report_function() time.sleep(1) class Testcase(object): """Single test case to be minimized.""" def __init__(self, data, minimizer): self.minimizer = minimizer if minimizer.tokenize: self.tokens = minimizer.tokenizer(data) else: self.tokens = data self.required_tokens = [True] * len(self.tokens) self.tested_hypotheses = set() self.unmerged_failing_hypotheses = [] self.tests_to_queue = [] self.currently_processing = False self.last_progress_report_time = 0 self.runs_since_last_cleanup = 0 if minimizer.max_threads > 1: self.test_queue = TestQueue( minimizer.max_threads, deadline_check=self._deadline_exceeded, progress_report_function=self._report_progress) self.merge_preparation_lock = threading.Lock() self.merge_lock = threading.Lock() self.cache_lock = threading.Lock() self.tests_to_queue_lock = threading.Lock() else: self.test_queue = None self.merge_preparation_lock = DummyLock() self.merge_lock = DummyLock() self.cache_lock = DummyLock() self.tests_to_queue_lock = DummyLock() def __str__(self): """Return the string form of the minimized test case (at this point).""" return self.minimizer.token_combiner(self.get_required_tokens()) # Helper functions based on minimizer configuration. def _deadline_exceeded(self, cleanup_function=None, soft_check=False): """Check to see if we have exceeded the deadline for execution.""" if self.minimizer.deadline and time.time() > self.minimizer.deadline: if soft_check: return True # If we are here, we have exceeded the deadline on a hard check. Clean up. if cleanup_function: cleanup_function() if self.minimizer.cleanup_function: self.minimizer.cleanup_function() # Raise an exception if this is not a soft deadline check. raise errors.MinimizationDeadlineExceededError(self) return False def _delete_file_if_needed(self, input_file): """Deletes a temporary file if necessary.""" # If we are not running in a mode where we need to delete files, do nothing. if not self.minimizer.tokenize or not self.minimizer.delete_temp_files: return try: os.remove(input_file) except OSError: pass def _report_progress(self): """Call a function to report progress if the minimizer uses one.""" if not self.minimizer.progress_report_function: return if time.time() - self.last_progress_report_time < PROGRESS_REPORT_INTERVAL: return self.last_progress_report_time = time.time() message = '%d/%d tokens remaining.' % (len(self.get_required_tokens()), len(self.required_tokens)) self.minimizer.progress_report_function(message) # Functions used when preparing tests. def _range_complement(self, current_range): """Return required tokens in the complement of the specified range.""" result = xrange(len(self.tokens)) to_remove = set(current_range) return [i for i in result if i not in to_remove and self.required_tokens[i]] def _prepare_test_input(self, tokens, tested_tokens): """Write the tokens currently being tested to a temporary file.""" tested_tokens = set(tested_tokens) current_tokens = [t for i, t in enumerate(tokens) if i in tested_tokens] if not self.minimizer.tokenize: return current_tokens data = self.minimizer.token_combiner(current_tokens) handle = self.minimizer.get_temp_file() destination = handle.name try: handle.write(data) except IOError: # We may have filled the disk. Try processing tests and writing again. self._do_single_pass_process() handle.write(data) handle.close() return destination def _get_test_file(self, hypothesis): """Return a test file for a hypothesis.""" complement = self._range_complement(hypothesis) return self._prepare_test_input(self.tokens, complement) def _push_test_to_queue(self, hypothesis): """Add a test for a hypothesis to a queue for processing.""" test_file = self._get_test_file(hypothesis) callback = functools.partial( self._handle_completed_test, hypothesis=hypothesis, input_file=test_file) should_run = functools.partial(self._contains_required_tokens, hypothesis, test_file) self.test_queue.push( test_file, self.minimizer.test_function, callback, should_run=should_run) # Make sure that we do not let too many unprocessed tests build up. if self.test_queue.size() >= self.minimizer.batch_size: self._do_single_pass_process() def prepare_test(self, hypothesis): """Prepare the test based on the mode we are running in.""" # Check the cache to make sure we have not tested this before. if self._has_tested(hypothesis): return # If we are single-threaded, just run and process results immediately. if not self.test_queue: # In the threaded case, we call the cleanup function before each pass # over the queue. It needs to be tracked here for the single-thread case. self.runs_since_last_cleanup += 1 if (self.runs_since_last_cleanup >= self.minimizer.single_thread_cleanup_interval and self.minimizer.cleanup_function): self.minimizer.cleanup_function() test_file = self._get_test_file(hypothesis) if self._contains_required_tokens(hypothesis, test_file): self._handle_completed_test( self.minimizer.test_function(test_file), hypothesis, test_file) # Check to see if we have exceeded the deadline and report progress. self._report_progress() self._deadline_exceeded() return if self.currently_processing: # If we are processing, we cannot write more tests or add to the queue. with self.tests_to_queue_lock: self.tests_to_queue.append(hypothesis) else: self._push_test_to_queue(hypothesis) # Functions used when processing test results. def _handle_completed_test(self, test_passed, hypothesis, input_file): """Update state based on the test result and hypothesis.""" # If the test failed, handle the result. if not test_passed: self._handle_failing_hypothesis(hypothesis) # Delete leftover files if necessary. self._delete_file_if_needed(input_file) # Minimizers may need to do something with the test result. self._process_test_result(test_passed, hypothesis) def _process_test_result(self, test_passed, hypothesis): """Additional processing of the result. Minimizers may override this.""" pass def _handle_failing_hypothesis(self, hypothesis): """Update the token list for a failing hypothesis.""" if not self.test_queue: # We aren't multithreaded, so just update the list directly. for token in hypothesis: self.required_tokens[token] = False return with self.merge_preparation_lock: self.unmerged_failing_hypotheses.append(hypothesis) if len(self.unmerged_failing_hypotheses) < MAX_MERGE_BATCH_SIZE: return hypotheses_to_merge = self.unmerged_failing_hypotheses self.unmerged_failing_hypotheses = [] # We may need to block while the previous batch is merging. If not, the # results from this batch could conflict with the results from the previous. with self.merge_lock: self._attempt_merge(hypotheses_to_merge) def _attempt_merge(self, hypotheses, sibling_merge_succeeded=False): """Update the required token list if the queued changes don't conflict.""" # If there's nothing to merge, we're done. if not hypotheses: return aggregate_tokens = set() for hypothesis in hypotheses: for token in hypothesis: aggregate_tokens.add(token) aggregate_hypothesis = list(aggregate_tokens) if sibling_merge_succeeded: # We were able to remove all tokens from the other half of this # hypothesis, so we can assume that this would fail without running the # test. If this would also pass, there would not have been a conflict # while testing this set. Well, this could be a flaky test, but then we # have bigger problems. test_passed = True else: complement = self._range_complement(aggregate_hypothesis) test_file = self._prepare_test_input(self.tokens, complement) test_passed = self.minimizer.test_function(test_file) self._delete_file_if_needed(test_file) # Failed (crashed), so there was no conflict here. if not test_passed: for token in aggregate_hypothesis: self.required_tokens[token] = False return # Passed (no crash). We need to try a bit harder to resolve this conflict. if len(hypotheses) == 1: # We really cannot remove this token. No additional work to be done. return front = hypotheses[:len(hypotheses) / 2] back = hypotheses[len(hypotheses) / 2:] # If we could remove either one of two hypotheses, favor removing the first. front_merged_successfully = self._attempt_merge(front) self._attempt_merge(back, sibling_merge_succeeded=front_merged_successfully) def _do_single_pass_process(self): """Process through a single pass of our test queue.""" self.currently_processing = True self.test_queue.process() # If a cleanup function is provided, call it. This is usually used to # ensure that all processes are terminated or perform additional cleanup. if self.minimizer.cleanup_function: self.minimizer.cleanup_function() # Push any results generated while this test was running to the queue. self.currently_processing = False while self.tests_to_queue: with self.tests_to_queue_lock: hypothesis = self.tests_to_queue.pop(0) # This may trigger another round of processing, so don't hold the lock. self._push_test_to_queue(hypothesis) def process(self): """Start a test.""" if not self.test_queue: return while self.test_queue.size(): self._do_single_pass_process() with self.merge_preparation_lock: hypotheses_to_merge = self.unmerged_failing_hypotheses self.unmerged_failing_hypotheses = [] with self.merge_lock: self._attempt_merge(hypotheses_to_merge) # Cache functions. def _contains_required_tokens(self, hypothesis, test_file): """Check to see if this hypothesis contains untested tokens.""" # It is possible that we could copy this while it is being updated. We do # not block in this case because the worst case scenario is that we run an # irrelevant test, and blocking is potentially expensive. working_required_tokens = copy.copy(self.required_tokens) with self.merge_preparation_lock: # A deep copy is not required. Hypotheses are not modified after being # added to the list for processing. unprocessed_hypotheses = copy.copy(self.unmerged_failing_hypotheses) for unprocessed_hypothesis in unprocessed_hypotheses: for token in unprocessed_hypothesis: # For this check, we do not care if the merge would succeed or not since # the best case is that we would add the token to the queue as well. working_required_tokens[token] = False for token in hypothesis: if working_required_tokens[token]: return True # If we aren't going to run this test, this will not have a completion # callback. If that happens, we need to clean up now. self._delete_file_if_needed(test_file) return False def _has_tested(self, hypothesis): """Check to see if this hypothesis has been tested before.""" hypothesis_tuple = tuple(hypothesis) with self.cache_lock: if hypothesis_tuple in self.tested_hypotheses: return True self.tested_hypotheses.add(hypothesis_tuple) return False # Result checking functions. def get_result(self): """Get the result of minimization.""" if not self.minimizer.tokenize: return self.get_required_tokens() return str(self) def get_required_tokens(self): """Return all required tokens for this test case.""" return [t for i, t in enumerate(self.tokens) if self.required_tokens[i]] def get_required_token_indices(self): """Get the indices of all remaining required tokens.""" return [i for i, v in enumerate(self.required_tokens) if v] def _default_tokenizer(s): """Default string tokenizer which splits on newlines.""" return s.split('\n') def _default_combiner(tokens): """Default token combiner which assumes each token is a line.""" return '\n'.join(tokens) class Minimizer(object): """Base class for minimizers.""" def __init__(self, test_function, max_threads=1, tokenizer=_default_tokenizer, token_combiner=_default_combiner, tokenize=True, cleanup_function=None, single_thread_cleanup_interval=DEFAULT_CLEANUP_INTERVAL, deadline=None, get_temp_file=None, delete_temp_files=True, batch_size=None, progress_report_function=None, file_extension=''): """Initialize a minimizer. A minimizer object can be used multiple times.""" self.test_function = test_function self.max_threads = max_threads self.tokenizer = tokenizer self.token_combiner = token_combiner self.tokenize = tokenize self.cleanup_function = cleanup_function self.single_thread_cleanup_interval = single_thread_cleanup_interval self.deadline = deadline self.get_temp_file = get_temp_file self.delete_temp_files = delete_temp_files self.progress_report_function = progress_report_function if batch_size: self.batch_size = batch_size else: self.batch_size = DEFAULT_TESTS_PER_THREAD * max_threads if not get_temp_file: self.get_temp_file = functools.partial( tempfile.NamedTemporaryFile, mode='wb', delete=False, prefix='min_', suffix=file_extension) else: self.get_temp_file = get_temp_file @staticmethod def _handle_constructor_argument(key, kwargs, default=None): """Cleanup a keyword argument specific to a subclass and get the value.""" result = default try: result = kwargs[key] del kwargs[key] except KeyError: pass return result def _execute(self, data): """Perform minimization on a test case.""" raise NotImplementedError def minimize(self, data): """Wrapper to perform common tasks and call |_execute|.""" try: testcase = self._execute(data) except errors.MinimizationDeadlineExceededError as error: # When a MinimizationDeadlineExceededError is raised, the partially # minimized test case is stored with it so that we can recover the work # that had been done up to that point. testcase = error.testcase return testcase.get_result() @staticmethod def run(data, thread_count=DEFAULT_THREAD_COUNT, file_extension=''): """Minimize |data| using this minimizer's default configuration.""" raise NotImplementedError
DSF_podtk_ctrl.py
"""DSF_podtk_ctrl.py - controller for the POD Toolkit Chris R. Coughlin, David S. Forsyth (TRI/Austin, Inc.) """ __author__ = 'Chris R. Coughlin' from models.DSF_podtk_model import DSF_PODData from models import workerthread from controllers import pathfinder from views import dialogs import wx import Queue class DSF_PODWindowController(object): """Controller for the PODWindow UI""" def __init__(self, view): self.view = view self.model = DSF_PODData(self) def get_models(self): """Retrieves the list of PODModels and populates the TreeCtrl""" pod_models = self.model.get_models() for model in pod_models: self.view.modeltree.add_model(model) # Event Handlers def on_import_data(self, evt): """ Handles Import File request""" wildcard = "Excel files (*.xls*)|*.xls*" style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST file_dlg = wx.FileDialog(self.view, 'Import Data', wildcard = wildcard, style = style) if file_dlg.ShowModal() == wx.ID_CANCEL: return filename = file_dlg.GetPath() """ TODO now find out what columns in Sheet1 are size, results maybe even give an error message? """ if self.model.import_data(filename) is False: return # now fill in grid of data, generate chart self.model.plot_input_data(self.view.axes1) self.view.Refresh() def on_export_data(self, evt): """ Handles Export File request""" wildcard = "Excel files (*.xlsx)|*.xlsx" style = wx.FD_SAVE file_dlg = wx.FileDialog(self.view, 'Export Data', wildcard = wildcard, style = style) filename = self.model.get_filename() file_dlg.SetFilename(filename) if file_dlg.ShowModal() == wx.ID_CANCEL: return filename = file_dlg.GetPath() self.model.export_data(filename) def on_quit(self, evt): """Handles Close Window request""" self.view.close() def on_add_model(self, evt): """Handles request to add a model""" # Placeholder for now addmodel_dlg = wx.MessageDialog(self.view, caption="Add A POD Model", message="This feature not yet implemented.\nPlease " \ "contact TRI for assistance." , style=wx.OK | wx.ICON_INFORMATION) addmodel_dlg.ShowModal() addmodel_dlg.Destroy() def on_delete_model(self, evt): """Handles request to delete a model""" # Placeholder for now delmodel_dlg = wx.MessageDialog(self.view, caption="Remove A POD Model", message="This feature not yet implemented.\nPlease " \ "contact TRI for assistance." , style=wx.OK | wx.ICON_INFORMATION) delmodel_dlg.ShowModal() delmodel_dlg.Destroy() def on_about(self, evt): """Handles request to show About dialog""" # Placeholder for now about_dlg = wx.MessageDialog(self.view, caption="About PODToolkit", message="This is the Probability Of Detection Toolkit", style=wx.OK | wx.ICON_INFORMATION) about_dlg.ShowModal() about_dlg.Destroy() def on_help(self, evt): """Handles request to show Help information""" # Placeholder for now help_dlg = wx.MessageDialog(self.view, caption="PODToolkit Help", message="This feature not yet implemented.\nPlease contact " \ "TRI for assistance." , style=wx.OK | wx.ICON_INFORMATION) help_dlg.ShowModal() help_dlg.Destroy() def on_selection_change(self, evt): """Handles selection change event in ModelTree - updates ModelProperty Editor""" item = evt.GetItem() if item: self.refresh_mpgrid(item) evt.Skip() def refresh_mpgrid(self, item): """Updates the ModelProperties Grid with the specified ModelTree item.""" selected_obj = self.view.modeltree.GetItemPyData(item) if isinstance(selected_obj, dict): self.view.mp_lbl.SetLabel(self.view.modeltree.selectionParentLabel()) self.view.mp_grid.ClearGrid() props = selected_obj.keys() self.view.mp_grid.SetNumberRows(len(props)) row = 0 for prop in props: self.view.mp_grid.SetCellValue(row, 0, prop) self.view.mp_grid.SetCellValue(row, 1, str(selected_obj.get(prop))) row += 1 def on_modeltree_change(self, evt): """Handles changes in the ModelTree - updates ModelProperty Editor""" self.on_selection_change(evt) def on_right_click_modeltree(self, evt): """Handles right-click event in the ModelTree""" click_pos = evt.GetPosition() item, flags = self.view.modeltree.HitTest(click_pos) if item: self.view.modeltree.SelectItem(item) self.view.tree_popup(click_pos) def on_edit_inputdata(self, evt): """Handles request to load input data into worksheet""" input_data = self.view.modeltree.selected_inputdata() if input_data is not None: if input_data['filetype'].lower() == 'csv': try: data = self.model.load_data(input_data['filename']) self.populate_spreadsheet(self.view.input_grid, data) except IOError as err: err_dlg = wx.MessageDialog(self.view, caption="Failed To Read File", message=str(err), style=wx.OK | wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() def on_choose_inputdata(self, evt): """Handles request to set input data file""" selected_input_data = self.view.modeltree.GetSelection() if selected_input_data.IsOk(): file_dlg = wx.FileDialog(self.view, message="Please select a CSV file", wildcard="CSV files (*.csv)|*.csv|Text Files (*.txt)|*" \ ".txt|All Files (*.*)|*.*" , style=wx.FD_OPEN) if file_dlg.ShowModal() == wx.ID_OK: inputdata_item = self.view.modeltree.GetItemPyData(selected_input_data) inputdata_item['filename'] = file_dlg.GetPath() self.view.modeltree.SetItemPyData(selected_input_data, inputdata_item) self.view.modeltree.SelectItem(selected_input_data) self.refresh_mpgrid(selected_input_data) def on_sheet_tool_click(self, evt): """Handles toolbar button clicks in the spreadsheet - currently supports Open File (id=20) and Save File (id=30).""" if evt.GetId() == 20: # Open File file_dlg = wx.FileDialog(self.view, message="Please select a CSV file", wildcard="CSV files (*.csv)|*.csv|Text Files (*.txt)|*" \ ".txt|All Files (*.*)|*.*" , style=wx.FD_OPEN) if file_dlg.ShowModal() == wx.ID_OK: try: grid = self.get_active_grid() data = self.model.load_data(file_dlg.GetPath()) if data is not None: self.populate_spreadsheet(grid, data) else: raise IOError("File not recognized as CSV.") except Exception as err: if str(err) is None: msg = "An unknown error occurred attempting to read the file." else: msg = "An error occurred attempting to read the file:\n\n{0}".format( str(err)) err_dlg = wx.MessageDialog(self.view, caption="Failed To Read File", message=msg, style=wx.OK | wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() elif evt.GetId() == 30: # Save File save_file_dlg = wx.FileDialog(self.view, message="Please specify an output filename", defaultDir=pathfinder.podmodels_path(), wildcard="CSV files (*.csv)|*.csv|Text Files (*.txt)|*" \ ".txt|All Files (*.*)|*.*" , style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if save_file_dlg.ShowModal() == wx.ID_OK: grid = self.get_active_grid() grid.WriteCSV(save_file_dlg.GetPath()) save_file_dlg.Destroy() def get_active_grid(self): """Returns the currently-selected Spreadsheet control from the view""" grid = None active_page = self.view.spreadsheet_nb.GetSelection() if active_page == 0: grid = self.view.input_grid elif active_page == 1: grid = self.view.output_grid return grid def on_property_change(self, evt): """Handles changes in ModelProperty Editor - ModelTree updated with new values.""" click_pos = evt.GetPosition() item = self.view.mp_grid.HitTest(click_pos) if item: property_name = self.view.mp_grid.GetCellValue(evt.GetRow(), 0) property_value = self.view.mp_grid.GetCellValue(evt.GetRow(), 1) selected_property = self.view.modeltree.GetSelection() if selected_property.IsOk() and selected_property != self.view.modeltree.GetRootItem(): self.view.modeltree.GetItemPyData(selected_property)[property_name] = \ property_value def on_save_model(self, evt): """Handles request to store POD Model configuration changes to disk""" model = self.view.modeltree.get_model() if model is not None: model.save_configuration() def on_runmodel(self, evt): """Handles request to execute current POD Model""" model = self.view.modeltree.get_model() if model is not None: self.run_model(model) def run_model(self, model_instance): """Runs the specified POD Model instance in a separate thread.""" exception_queue = Queue.Queue() model_thd = workerthread.WorkerThread(exception_queue=exception_queue, target=self.model.run) model_thd.start() progress_dlg = dialogs.progressDialog(dlg_title="Running POD Model", dlg_msg="Please wait, running POD Model...") while True: model_thd.join(0.125) progress_dlg.update() if not model_thd.is_alive(): try: exc_type, exc = exception_queue.get(block=False) err_msg = "An error occurred while running the POD Model:\n{0}".format(exc) err_dlg = wx.MessageDialog(self.view.parent, message=err_msg, caption="Error In POD Model Execution", style=wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() return except Queue.Empty: # No errors occurred, continue processing # NEED TO FIX THIS FOR THE NEW UI model_instance.plot_POD_results(self.view.axes1) if model_instance.data is not None: # Model returned data to display try: self.populate_spreadsheet(self.view.output_grid, model_instance.data) self.view.spreadsheet_nb.ChangeSelection(self.view.output_sheet_page) except MemoryError: # File too large to load err_msg = "The file is too large to load." err_dlg = wx.MessageDialog(self.view, message=err_msg, caption="Unable To Preview Data", style=wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() if model_instance.results is not None: # Model return output text to display self.view.txtoutput_tc.WriteText(model_instance.results) self.refresh_plots() break finally: progress_dlg.close() wx.GetApp().Yield(True) def refresh_plots(self): """Forces update to the plots (required after some plotting commands)""" self.view.canvas1.draw() self.view.canvas2.draw() def populate_spreadsheet(self, spreadsheet_ctrl, data_array): """Clears the specified wxSpreadSheet instance and fills with the contents of the NumPy data_array.""" spreadsheet_ctrl.ClearGrid() spreadsheet_ctrl.SetNumberRows(0) spreadsheet_ctrl.SetNumberCols(0) rownum = 0 if data_array.ndim == 2: num_rows = data_array.shape[0] for row in range(num_rows): spreadsheet_ctrl.AppendRows(1) numcols = data_array[row].size if spreadsheet_ctrl.GetNumberCols() < numcols: spreadsheet_ctrl.SetNumberCols(numcols) colnum = 0 for cell in data_array[row]: spreadsheet_ctrl.SetCellValue(rownum, colnum, str(cell)) colnum += 1 rownum += 1 elif data_array.ndim == 1: spreadsheet_ctrl.SetNumberCols(1) for el in data_array: spreadsheet_ctrl.AppendRows(1) spreadsheet_ctrl.SetCellValue(rownum, 0, str(el)) rownum += 1
pyminer.py
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8549 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
example_stream_buffer_extended.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # File: example_stream_buffer_extended.py # # Part of ‘UNICORN Binance WebSocket API’ # Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api # Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api # PyPI: https://pypi.org/project/unicorn-binance-websocket-api/ # # Author: LUCIT Systems and Development # # Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from __future__ import print_function from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager import logging import time import threading import os logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager") logging.basicConfig(level=logging.DEBUG, filename=os.path.basename(__file__) + '.log', format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}", style="{") # create instance of BinanceWebSocketApiManager binance_websocket_api_manager = BinanceWebSocketApiManager() markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd', 'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb', 'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc', 'tusdusdt', 'atombusd', 'nxseth', 'bnbusdt', 'trxxrp', 'erdpax', 'erdbtc', 'icxbusd', 'nulsbtc', 'hotusdt', 'wavespax', 'zilbnb', 'arnbtc', 'nulsusdt', 'wintrx', 'npxsbtc', 'busdtry', 'qtumbnb', 'eosbtc', 'xlmpax', 'tomobnb', 'eosbnb', 'engbtc', 'linketh', 'xrpbtc', 'fetbtc', 'stratusdt', 'navbnb', 'bcneth', 'yoyobtc', 'nanobnb', 'saltbtc', 'tfuelusdc', 'skybnb', 'fuelbtc', 'bnbusdc', 'inseth', 'btcpax', 'batbtc', 'rlceth', 'arketh', 'ltcpax', 'ltcbusd', 'duskbtc', 'mftusdt', 'bntusdt', 'mdabtc', 'enjbtc', 'poabnb', 'nanobusd', 'paxtusd', 'hotbtc', 'bcdbtc', 'beambnb', 'trxeth', 'omgbnb', 'cdtbtc', 'eosusdc', 'dashbusd', 'cocosbtc', 'dasheth', 'xrptusd', 'atomtusd', 'rcneth', 'rpxeth', 'xlmusdc', 'aionbusd', 'nxsbtc', 'chateth', 'repbtc', 'tctusdt', 'linkusdt', 'nasbtc', 'usdsusdc', 'xvgbtc', 'elfeth', 'ctxcbtc', 'cmteth', 'gnteth', 'usdspax', 'zilbtc', 'batpax', 'stratbtc', 'xzcbtc', 'iotausdt', 'etcbnb', 'ankrusdt', 'xlmeth', 'loombtc', 'erdusdc', 'rdnbnb', 'icneth', 'vetbtc', 'cvcusdt', 'ftmpax', 'ethbullusdt', 'edoeth', 'steemeth', 'gobnb', 'hsrbtc', 'ambbtc', 'bchabcbtc', 'dntbtc', 'btctusd', 'denteth', 'snglsbtc', 'eosbullusdt', 'xlmtusd', 'tnteth', 'sysbnb', 'renusdt', 'zrxusdt', 'xlmbtc', 'stormbtc', 'ncashbnb', 'omgusdt', 'troyusdt', 'venbtc', 'modbtc', 'dogepax', 'ontusdc', 'eurbusd', 'tctbnb', 'gxsbtc', 'celrbnb', 'adausdt', 'beambtc', 'elfbtc', 'celrbtc', 'rvnusdt', 'poaeth', 'wavesusdc', 'trxbnb', 'trxusdc', 'ethbearusdt', 'ethpax', 'bateth', 'kavabtc', 'paxbtc', 'trigbnb', 'btcusdc', 'oneusdc', 'xrptry', 'stxusdt', 'strateth', 'lendeth', 'neousdc', 'mithusdt', 'btcngn', 'blzeth', 'evxeth', 'dnteth', 'grsbtc', 'arneth', 'iotabnb', 'waneth', 'xtzbnb', 'subeth', 'btsbtc', 'cvceth', 'ethusdc', 'etctusd', 'cloakbtc', 'grseth', 'eospax', 'cdteth', 'bchusdt', 'lskusdt', 'enjbusd', 'drepbtc', 'manaeth', 'tomousdt', 'algobnb', 'wtceth', 'linkpax', 'batbnb', 'sceth', 'rvnbusd', 'cvcbnb', 'manabtc', 'gasbtc', 'stxbtc', 'cloaketh', 'neotusd', 'lrceth', 'thetabtc', 'dogeusdt', 'aionbnb', 'viabtc', 'keyeth', 'nanoeth', 'ncasheth', 'bgbpusdc', 'ltobnb', 'snmeth', 'adabtc', 'btseth', 'qtumbusd', 'wtcbnb', 'dcrbtc', 'fttbnb', 'paxbnb', 'insbtc', 'gntbnb', 'etheur', 'dashusdt', 'rcnbtc', 'btcusdt', 'wanusdt', 'powrbnb', 'xmrbnb', 'trigeth', 'xzceth', 'bchbtc', 'qspbnb', 'scbnb', 'mcoeth', 'powrbtc', 'algotusd', 'ankrbtc', 'tusdeth', 'keybtc', 'usdcusdt', 'ftmusdc', 'atombnb', 'zenbtc', 'dockbtc', 'neobtc', 'phbbnb', 'bnbpax', 'brdbnb', 'trxusdt', 'trxbusd', 'mtlbtc', 'ftmtusd', 'perlusdc', 'mithbnb', 'eosbullbusd', 'reqeth', 'bccbnb', 'veneth', 'loombnb', 'trxpax', 'usdcpax', 'stormusdt', 'ognbtc', 'gvtbtc', 'iotaeth', 'naseth', 'drepusdt', 'gvteth', 'wrxusdt', 'bchabcpax', 'ongbtc', 'usdcbnb', 'dgdeth', 'salteth', 'mtleth', 'bcnbnb', 'neblbnb', 'wanbnb', 'ontusdt', 'npxsusdt', 'mftbtc', 'eosbearbusd', 'bntbtc', 'gtoeth', 'modeth', 'etcusdc', 'veteth', 'bcptpax', 'atomusdc', 'duskpax', 'kavabnb', 'lunbtc', 'adxbtc', 'bnteth', 'funbtc', 'knceth', 'dogebtc', 'bchsvpax', 'bcpttusd', 'osteth', 'oaxeth', 'wabibtc', 'appcbtc', 'qkcbtc', 'nanousdt', 'wingsbtc', 'hbarusdt', 'eurusdt', 'waveseth', 'asteth', 'linkbusd', 'btttusd', 'zecusdc', 'bnbusds', 'linkbtc', 'venusdt', 'hotbnb', 'usdtrub', 'tctbtc', 'ankrpax', 'btctry', 'adabnb', 'polybtc', 'bcceth', 'enjeth', 'bnbbusd', 'repbnb', 'bullusdt', 'vitebtc', 'btgbtc', 'renbtc', 'thetausdt', 'troybtc', 'dentbtc', 'ostbtc', 'nxsbnb', 'mithbtc', 'xmrbtc', 'tomobtc', 'nulseth', 'phbbtc', 'duskbnb', 'yoyoeth', 'ontbusd', 'btgeth', 'etcusdt', 'atomusdt', 'hcbtc', 'brdbtc', 'fttbtc', 'celrusdt', 'lskbnb', 'phbpax', 'xtzbtc', 'batusdt', 'viteusdt', 'trxbtc', 'bchtusd', 'xtzusdt', 'ftmbtc', 'enjbnb', 'arkbtc', 'wavesusdt', 'ftmusdt', 'neobusd', 'stormbnb', 'luneth', 'gntbtc', 'gtousdt', 'chzusdt', 'sntbtc', 'bandbnb', 'hoteth', 'wingseth', 'mcobtc', 'docketh', 'drepbnb', 'eosusdt', 'eostusd', 'npxseth', 'thetaeth', 'iotxbtc', 'phxbnb', 'enjusdt', 'tfuelbnb', 'mcobnb', 'ontpax', 'dcrbnb', 'batusdc', 'snglseth', 'qlcbtc', 'qspeth', 'cndeth', 'appcbnb', 'wprbtc', 'sysbtc', 'iostusdt', 'btceur', 'mtlusdt', 'ethrub', 'tfuelpax', 'maticusdt', 'ftmbnb', 'xrpbusd', 'iotxusdt', 'tusdbtusd', 'trigbtc', 'atombtc', 'bchpax', 'eosbusd', 'zileth', 'gtotusd', 'xrpbullusdt', 'onetusd', 'algobtc', 'bchsvusdt', 'gtopax', 'etceth', 'vibebtc', 'bttusdt', 'repeth', 'iostbnb', 'usdttry', 'btsbnb', 'ankrbnb', 'dltbnb', 'snteth', 'linktusd', 'nknusdt', 'rpxbtc', 'rdneth', 'cocosusdt', 'etcbusd', 'btttrx', 'bandbtc', 'steembnb', 'zecpax', 'viabnb', 'cosbnb', 'mtheth', 'xrpusdc', 'xemeth', 'pivxbnb', 'phxbtc', 'zilusdt', 'poeeth', 'bnbeur', 'bandusdt', 'vetbnb', 'lendbtc', 'xlmbnb', 'duskusdt', 'mfteth', 'funusdt', 'adabusd', 'perlbnb', 'btcbusd', 'ltobtc', 'nasbnb', 'algousdt', 'zeneth', 'bchsvusdc', 'mcousdt', 'venbnb', 'hceth', 'fetusdt', 'edobtc', 'mftbnb', 'cosusdt', 'arpausdt', 'xmrusdt', 'ctxcusdt', 'bqxbtc', 'npxsusdc', 'icxbnb', 'bchbnb', 'phbusdc', 'tomousdc', 'nulsbnb', 'rcnbnb', 'arpabnb', 'qtumbtc', 'keyusdt', 'agibtc', 'mblbtc', 'eoseth', 'tusdbtc', 'aioneth', 'storjbtc', 'lsketh', 'bchsvbtc', 'bntbusd', 'ncashbtc', 'mblbnb', 'polybnb', 'aebnb', 'ltceth', 'dogeusdc', 'wpreth', 'syseth', 'bcnbtc', 'ognusdt', 'nanobtc', 'astbtc', 'zrxeth', 'adxeth', 'gxseth', 'ethbearbusd', 'onepax', 'scbtc', 'icxbtc', 'ontbnb', 'qlceth', 'btsbusd', 'rlcbtc', 'chatbtc', 'wabibnb', 'renbnb', 'xrpbullbusd', 'wavesbtc', 'funeth', 'rlcbnb', 'phxeth', 'winbtc', 'storjeth', 'wavesbusd', 'iostbtc', 'icxeth', 'adatusd', 'nknbnb', 'btcrub', 'pivxbtc', 'perlusdt', 'bullbusd', 'bttusdc', 'bcptbtc', 'aebtc', 'ethusdt', 'ltousdt', 'subbtc', 'thetabnb', 'blzbtc', 'tfuelusdt', 'evxbtc', 'hbarbtc', 'ambeth', 'winusdt', 'qtumeth', 'dgdbtc', 'adaeth', 'busdusdt', 'xrpbnb', 'adapax', 'usdsbusds', 'cocosbnb', 'navbtc', 'rvnbtc', 'tnbbtc', 'bnbbtc', 'neopax', 'bearusdt', 'usdstusd', 'snmbtc', 'rvnbnb', 'gtobnb', 'phbtusd', 'hcusdt', 'btcusds', 'reqbtc', 'ognbnb', 'lrcbtc', 'xrpeth', 'loometh', 'zectusd', 'vibeeth', 'gobtc', 'bnbtry', 'bcdeth', 'qkceth', 'neoeth', 'paxusdt', 'bchsvtusd', 'fetbnb', 'yoyobnb', 'xlmbusd', 'skyeth', 'paxeth', 'ltcbtc', 'xvgeth', 'tnbeth', 'stratbusd', 'agieth', 'xlmusdt', 'lskbtc', 'bearbusd', 'hsreth', 'ctxcbnb', 'oaxbtc', 'qspbtc', 'iotxeth', 'qlcbnb', 'algousdc', 'etcpax', 'fueleth', 'aionusdt', 'xmreth', 'maticbtc', 'dashbnb', 'oneusdt', 'brdeth', 'viaeth', 'omgeth', 'ankrtusd', 'usdsusdt', 'ethtusd', 'wavestusd', 'iosteth', 'cmtbnb', 'ostbnb', 'ltcusdt', 'ethtry', 'zrxbtc', 'bchabcusdt', 'onebnb', 'beamusdt', 'nebleth', 'bcptbnb', 'adxbnb', 'ontbtc', 'bttbnb', 'dockusdt', 'bccbtc', 'omgbtc', 'algopax', 'neousdt', 'xrprub', 'busdngn', 'appceth', 'dentusdt', 'xzcbnb', 'tfueltusd', 'xembnb', 'arpabtc', 'ankrusdc', 'adausdc', 'kmdeth', 'troybnb', 'bnbeth', 'ltcusdc', 'databtc', 'blzbnb', 'naveth', 'btcbbtc', 'battusd', 'bnbngn', 'bchbusd', 'busdrub', 'ltctusd', 'vetbusd', 'ongbnb', 'fttusdt', 'bccusdt', 'ongusdt', 'engeth', 'usdctusd', 'etcbtc', 'gtousdc', 'mdaeth', 'vitebnb', 'erdusdt', 'dltbtc', 'bnbtusd', 'wtcbtc', 'xrpusdt', 'xrpeur', 'agibnb', 'trxtusd', 'ethbullbusd', 'iotabtc', 'xembtc', 'bchabcusdc', 'duskusdc', 'xrppax', 'mblusdt', 'kmdbtc', 'neblbtc', 'maticbnb', 'bnbrub', 'bcpteth', 'bttbtc', 'stxbnb', 'dlteth', 'onteth', 'vetusdt', 'ppteth', 'ethbtc', 'onebtc', 'ethbusd', 'zecbtc', 'erdbnb', 'xrpbearusdt', 'stratbnb', 'cmtbtc', 'cvcbtc', 'kncbtc', 'rpxbnb', 'zenbnb', 'cndbnb', 'ardrbnb', 'bchabcbusd', 'ltcbnb', 'pivxeth', 'skybtc', 'tntbtc', 'poebtc', 'steembtc', 'icxusdt', 'tfuelbtc', 'chzbtc', 'vibeth', 'winusdc', 'gtobtc', 'linkusdc', 'batbusd', 'rdnbtc', 'dataeth', 'bttpax', 'zrxbnb', 'vibbtc', 'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc', 'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt'] channels = ['kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w', 'trade', 'miniTicker', 'depth20'] for channel in channels: binance_websocket_api_manager.create_stream(channel, markets, stream_buffer_name=channel) def print_stream_data_from_stream_buffer(binance_websocket_api_manager): print("print trades only") time.sleep(10) while True: if binance_websocket_api_manager.is_manager_stopping(): exit(0) oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer("trade") if oldest_stream_data_from_stream_buffer is False: time.sleep(0.01) else: try: print(oldest_stream_data_from_stream_buffer) except Exception: # not able to process the data? write it back to the stream_buffer binance_websocket_api_manager.add_to_stream_buffer(oldest_stream_data_from_stream_buffer) # start a worker process to process to move the received stream_data from the stream_buffer to a print function worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,)) worker_thread.start()
example6.py
import threading import time from timeit import default_timer as timer def thread_a(): print('Thread A is starting...') print('Thread A waiting to acquire lock A.') lock_a.acquire() print('Thread A has acquired lock A, performing some calculation...') time.sleep(2) print('Thread A waiting to acquire lock B.') lock_b.acquire() print('Thread A has acquired lock B, performing some calculation...') time.sleep(2) print('Thread A releasing both locks.') lock_a.release() lock_b.release() def thread_b(): print('Thread B is starting...') print('Thread B waiting to acquire lock A.') lock_a.acquire() print('Thread B has acquired lock A, performing some calculation...') time.sleep(5) print('Thread B waiting to acquire lock B.') lock_b.acquire() print('Thread B has acquired lock B, performing some calculation...') time.sleep(5) print('Thread B releasing both locks.') lock_b.release() lock_a.release() lock_a = threading.Lock() lock_b = threading.Lock() thread1 = threading.Thread(target=thread_a) thread2 = threading.Thread(target=thread_b) start = timer() thread1.start() thread1.join() thread2.start() thread2.join() print('Took %.2f seconds.' % (timer() - start)) print('Finished.')
custom.py
# pylint: disable=too-many-lines # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import io import json import os import os.path import platform import re import ssl import stat import subprocess import sys import tempfile import threading import time import uuid import base64 import webbrowser import zipfile from distutils.version import StrictVersion from math import isnan from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error import requests from knack.log import get_logger from knack.util import CLIError from knack.prompting import prompt_pass, NoTTYException import yaml # pylint: disable=import-error from dateutil.relativedelta import relativedelta # pylint: disable=import-error from dateutil.parser import parse # pylint: disable=import-error from msrestazure.azure_exceptions import CloudError import colorama # pylint: disable=import-error from tabulate import tabulate # pylint: disable=import-error from azure.cli.core.api import get_config_dir from azure.cli.core.azclierror import ManualInterrupt, InvalidArgumentValueError, UnclassifiedUserFault, CLIInternalError, FileOperationError, ClientRequestError, DeploymentError, ValidationError, ArgumentUsageError, MutuallyExclusiveArgumentError, RequiredArgumentMissingError, ResourceNotFoundError from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.cli.core.commands import LongRunningOperation from azure.cli.core._profile import Profile from azure.graphrbac.models import (ApplicationCreateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters) from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW from .vendored_sdks.azure_mgmt_preview_aks.v2021_09_01.models import (ContainerServiceLinuxProfile, ManagedClusterWindowsProfile, ContainerServiceNetworkProfile, ManagedClusterServicePrincipalProfile, ContainerServiceSshConfiguration, ContainerServiceSshPublicKey, ManagedCluster, ManagedClusterAADProfile, ManagedClusterAddonProfile, ManagedClusterAgentPoolProfile, AgentPool, AgentPoolUpgradeSettings, ContainerServiceStorageProfileTypes, ManagedClusterIdentity, ManagedClusterAPIServerAccessProfile, ManagedClusterSKU, ManagedServiceIdentityUserAssignedIdentitiesValue, ManagedClusterAutoUpgradeProfile, KubeletConfig, LinuxOSConfig, ManagedClusterHTTPProxyConfig, SysctlConfig, ManagedClusterPodIdentityProfile, ManagedClusterPodIdentity, ManagedClusterPodIdentityException, UserAssignedIdentity, WindowsGmsaProfile, PowerState, Snapshot, CreationData) from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import get_msi_client from ._client_factory import cf_resources from ._client_factory import get_resource_by_name from ._client_factory import cf_container_registry_service from ._client_factory import cf_storage from ._client_factory import cf_agent_pools from ._client_factory import cf_snapshots from ._client_factory import cf_snapshots_client from ._resourcegroup import get_rg_location from ._roleassignments import add_role_assignment, create_role_assignment, build_role_scope, resolve_role_id, \ resolve_object_id from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type, _parse_comma_separated_list, _trim_fqdn_name_containing_hcp) from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided, update_load_balancer_profile, create_load_balancer_profile) from ._natgateway import (create_nat_gateway_profile, update_nat_gateway_profile, is_nat_gateway_profile_provided) from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME from ._consts import CONST_MONITORING_ADDON_NAME from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID from ._consts import CONST_MONITORING_USING_AAD_MSI_AUTH from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME from ._consts import CONST_AZURE_POLICY_ADDON_NAME from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE from ._consts import CONST_SCALE_DOWN_MODE_DELETE from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED, CONST_ROTATION_POLL_INTERVAL from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE from ._consts import ADDONS, ADDONS_DESCRIPTIONS from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal from .addonconfiguration import update_addons, enable_addons, ensure_default_log_analytics_workspace_for_monitoring, \ sanitize_loganalytics_ws_resource_id, ensure_container_insights_for_monitoring, add_monitoring_role_assignment, \ add_ingress_appgw_addon_role_assignment, add_virtual_node_role_assignment logger = get_logger(__name__) def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: # added in python 2.7.13 and 3.6 return ssl.SSLContext(ssl.PROTOCOL_TLS) except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal( cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub( '[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) # pylint: disable=too-many-locals def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type, key_usage=key_usage, start_date=start_date, end_date=end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError( 'specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list( filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None, scope=None, include_inherited=False, yes=None): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions ids = ids or [] if ids: if assignee or role or resource_group_name or scope or include_inherited: raise CLIError( 'When assignment ids are used, other parameter values are not required') for i in ids: assignments_client.delete_by_id(i) return if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]): from knack.prompting import prompt_y_n msg = 'This will delete all role assignments under the subscription. Are you sure?' if not prompt_y_n(msg, default="n"): return scope = build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups=False) if assignments: for a in assignments: assignments_client.delete_by_id(a.id) def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups): assignee_object_id = None if assignee: assignee_object_id = resolve_object_id(cli_ctx, assignee) # always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups if scope: assignments = list(assignments_client.list_for_scope( scope=scope, filter='atScope()')) elif assignee_object_id: if include_groups: f = "assignedTo('{}')".format(assignee_object_id) else: f = "principalId eq '{}'".format(assignee_object_id) assignments = list(assignments_client.list(filter=f)) else: assignments = list(assignments_client.list()) if assignments: assignments = [a for a in assignments if ( not scope or include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or _get_role_property(a, 'scope').lower() == scope.lower() )] if role: role_id = resolve_role_id(role, scope, definitions_client) assignments = [i for i in assignments if _get_role_property( i, 'role_definition_id') == role_id] if assignee_object_id: assignments = [i for i in assignments if _get_role_property( i, 'principal_id') == assignee_object_id] return assignments def _get_role_property(obj, property_name): if isinstance(obj, dict): return obj[property_name] return getattr(obj, property_name) def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False _re_user_assigned_identity_resource_id = re.compile( r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)', flags=re.IGNORECASE) def _get_user_assigned_identity(cli_ctx, resource_id): resource_id = resource_id.lower() match = _re_user_assigned_identity_resource_id.search(resource_id) if match: subscription_id = match.group(1) resource_group_name = match.group(2) identity_name = match.group(3) msi_client = get_msi_client(cli_ctx, subscription_id) try: identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name, resource_name=identity_name) except CloudError as ex: if 'was not found' in ex.message: raise CLIError("Identity {} not found.".format(resource_id)) raise CLIError(ex.message) return identity raise CLIError( "Cannot parse identity name from provided resource id {}.".format(resource_id)) def _get_user_assigned_identity_client_id(cli_ctx, resource_id): return _get_user_assigned_identity(cli_ctx, resource_id).client_id def _get_user_assigned_identity_object_id(cli_ctx, resource_id): return _get_user_assigned_identity(cli_ctx, resource_id).principal_id def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp _re_snapshot_resource_id = re.compile( r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)', flags=re.IGNORECASE) def _get_snapshot(cli_ctx, snapshot_id): snapshot_id = snapshot_id.lower() match = _re_snapshot_resource_id.search(snapshot_id) if match: subscription_id = match.group(1) resource_group_name = match.group(2) snapshot_name = match.group(3) snapshot_client = cf_snapshots_client(cli_ctx, subscription_id=subscription_id) try: snapshot = snapshot_client.get(resource_group_name, snapshot_name) except CloudError as ex: if 'was not found' in ex.message: raise InvalidArgumentValueError("Snapshot {} not found.".format(snapshot_id)) raise CLIError(ex.message) return snapshot raise InvalidArgumentValueError( "Cannot parse snapshot name from provided resource id {}.".format(snapshot_id)) def aks_browse( cmd, client, resource_group_name, name, disable_browser=False, listen_address="127.0.0.1", listen_port="8001", ): from azure.cli.command_modules.acs.custom import _aks_browse return _aks_browse( cmd, client, resource_group_name, name, disable_browser, listen_address, listen_port, CUSTOM_MGMT_AKS_PREVIEW, ) def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def aks_maintenanceconfiguration_list( cmd, client, resource_group_name, cluster_name ): return client.list_by_managed_cluster(resource_group_name, cluster_name) def aks_maintenanceconfiguration_show( cmd, client, resource_group_name, cluster_name, config_name ): logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ', resource_group_name, cluster_name, config_name) return client.get(resource_group_name, cluster_name, config_name) def aks_maintenanceconfiguration_delete( cmd, client, resource_group_name, cluster_name, config_name ): logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ', resource_group_name, cluster_name, config_name) return client.delete(resource_group_name, cluster_name, config_name) def aks_maintenanceconfiguration_add( cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour ): configs = client.list_by_managed_cluster(resource_group_name, cluster_name) for config in configs: if config.name == config_name: raise CLIError("Maintenance configuration '{}' already exists, please try a different name, " "use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name)) return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour) def aks_maintenanceconfiguration_update( cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour ): configs = client.list_by_managed_cluster(resource_group_name, cluster_name) found = False for config in configs: if config.name == config_name: found = True break if not found: raise CLIError("Maintenance configuration '{}' doesn't exist." "use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name)) return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour) # pylint: disable=unused-argument,too-many-locals def aks_create(cmd, client, resource_group_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", windows_admin_username=None, windows_admin_password=None, enable_ahub=False, kubernetes_version='', node_vm_size=None, node_osdisk_type=None, node_osdisk_size=0, node_osdisk_diskencryptionset_id=None, node_count=3, nodepool_name="nodepool1", nodepool_tags=None, nodepool_labels=None, service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, enable_vmss=None, vm_set_type=None, skip_subnet_role_assignment=False, os_sku=None, enable_fips_image=False, enable_cluster_autoscaler=False, cluster_autoscaler_profile=None, network_plugin=None, network_policy=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, load_balancer_sku=None, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, nat_gateway_managed_outbound_ip_count=None, nat_gateway_idle_timeout=None, outbound_type=None, enable_addons=None, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, min_count=None, max_count=None, vnet_subnet_id=None, pod_subnet_id=None, ppg=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, node_zones=None, enable_node_public_ip=False, node_public_ip_prefix_id=None, generate_ssh_keys=False, # pylint: disable=unused-argument enable_pod_security_policy=False, node_resource_group=None, uptime_sla=False, attach_acr=None, enable_private_cluster=False, private_dns_zone=None, enable_managed_identity=True, fqdn_subdomain=None, disable_public_fqdn=False, api_server_authorized_ip_ranges=None, aks_custom_headers=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_aad=False, enable_azure_rbac=False, aad_admin_group_object_ids=None, aci_subnet_name=None, enable_sgxquotehelper=False, kubelet_config=None, linux_os_config=None, http_proxy_config=None, assign_identity=None, auto_upgrade_channel=None, enable_pod_identity=False, enable_pod_identity_with_kubenet=False, enable_encryption_at_host=False, enable_ultra_ssd=False, edge_zone=None, enable_secret_rotation=False, rotation_poll_interval=None, disable_local_accounts=False, no_wait=False, assign_kubelet_identity=None, workload_runtime=None, gpu_instance_profile=None, enable_windows_gmsa=False, gmsa_dns_server=None, gmsa_root_domain_name=None, snapshot_id=None, yes=False): # get all the original parameters and save them as a dictionary raw_parameters = locals() # decorator pattern from .decorator import AKSPreviewCreateDecorator from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException aks_create_decorator = AKSPreviewCreateDecorator( cmd=cmd, client=client, raw_parameters=raw_parameters, resource_type=CUSTOM_MGMT_AKS_PREVIEW, ) try: # construct mc profile mc = aks_create_decorator.construct_preview_mc_profile() except DecoratorEarlyExitException: # exit gracefully return None # send request to create a real managed cluster return aks_create_decorator.create_mc(mc) def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals client, resource_group_name, name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, cluster_autoscaler_profile=None, min_count=None, max_count=None, no_wait=False, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, nat_gateway_managed_outbound_ip_count=None, nat_gateway_idle_timeout=None, api_server_authorized_ip_ranges=None, enable_pod_security_policy=False, disable_pod_security_policy=False, attach_acr=None, detach_acr=None, uptime_sla=False, no_uptime_sla=False, enable_aad=False, aad_tenant_id=None, aad_admin_group_object_ids=None, enable_ahub=False, disable_ahub=False, aks_custom_headers=None, auto_upgrade_channel=None, enable_managed_identity=False, assign_identity=None, enable_pod_identity=False, enable_pod_identity_with_kubenet=False, disable_pod_identity=False, enable_secret_rotation=False, disable_secret_rotation=False, rotation_poll_interval=None, disable_local_accounts=False, enable_local_accounts=False, enable_public_fqdn=False, disable_public_fqdn=False, yes=False, tags=None, nodepool_labels=None, windows_admin_password=None, enable_azure_rbac=False, disable_azure_rbac=False, enable_windows_gmsa=False, gmsa_dns_server=None, gmsa_root_domain_name=None): update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler update_acr = attach_acr is not None or detach_acr is not None update_pod_security = enable_pod_security_policy or disable_pod_security_policy update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) update_natgw_profile = is_nat_gateway_profile_provided(nat_gateway_managed_outbound_ip_count, nat_gateway_idle_timeout) update_aad_profile = not ( aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac) # pylint: disable=too-many-boolean-expressions if not update_autoscaler and \ cluster_autoscaler_profile is None and \ not update_acr and \ not update_lb_profile \ and api_server_authorized_ip_ranges is None and \ not update_pod_security and \ not update_lb_profile and \ not update_natgw_profile and \ not uptime_sla and \ not no_uptime_sla and \ not enable_aad and \ not update_aad_profile and \ not enable_ahub and \ not disable_ahub and \ not auto_upgrade_channel and \ not enable_managed_identity and \ not assign_identity and \ not enable_pod_identity and \ not disable_pod_identity and \ not enable_secret_rotation and \ not disable_secret_rotation and \ not rotation_poll_interval and \ not tags and \ not windows_admin_password and \ not enable_local_accounts and \ not disable_local_accounts and \ not enable_public_fqdn and \ not disable_public_fqdn and \ not enable_windows_gmsa and \ not nodepool_labels: raise CLIError('Please specify "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--cluster-autoscaler-profile" or ' '"--enable-pod-security-policy" or ' '"--disable-pod-security-policy" or ' '"--api-server-authorized-ip-ranges" or ' '"--attach-acr" or ' '"--detach-acr" or ' '"--uptime-sla" or ' '"--no-uptime-sla" or ' '"--load-balancer-managed-outbound-ip-count" or ' '"--load-balancer-outbound-ips" or ' '"--load-balancer-outbound-ip-prefixes" or ' '"--nat-gateway-managed-outbound-ip-count" or ' '"--nat-gateway-idle-timeout" or ' '"--enable-aad" or ' '"--aad-tenant-id" or ' '"--aad-admin-group-object-ids" or ' '"--enable-ahub" or ' '"--disable-ahub" or ' '"--enable-managed-identity" or ' '"--enable-pod-identity" or ' '"--disable-pod-identity" or ' '"--auto-upgrade-channel" or ' '"--enable-secret-rotation" or ' '"--disable-secret-rotation" or ' '"--rotation-poll-interval" or ' '"--tags" or ' '"--windows-admin-password" or ' '"--enable-azure-rbac" or ' '"--disable-azure-rbac" or ' '"--enable-local-accounts" or ' '"--disable-local-accounts" or ' '"--enable-public-fqdn" or ' '"--disable-public-fqdn"' '"--enble-windows-gmsa" or ' '"--nodepool-labels"') instance = client.get(resource_group_name, name) _fill_defaults_for_pod_identity_profile(instance.pod_identity_profile) if update_autoscaler and len(instance.agent_pool_profiles) > 1: raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command ' 'to update per node pool auto scaler settings') if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count.') if enable_cluster_autoscaler: if instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n' 'Please run "az aks update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) instance.agent_pool_profiles[0].enable_auto_scaling = True if update_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n' 'Run "az aks update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) if disable_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning( 'Cluster autoscaler is already disabled for this managed cluster.') return None instance.agent_pool_profiles[0].enable_auto_scaling = False instance.agent_pool_profiles[0].min_count = None instance.agent_pool_profiles[0].max_count = None # if intention is to clear profile if cluster_autoscaler_profile == {}: instance.auto_scaler_profile = {} # else profile is provided, update instance profile if it exists elif cluster_autoscaler_profile: instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__, dict((key.replace("-", "_"), value) for (key, value) in cluster_autoscaler_profile.items())) \ if instance.auto_scaler_profile else cluster_autoscaler_profile if enable_pod_security_policy and disable_pod_security_policy: raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy ' 'at the same time.') if enable_pod_security_policy: instance.enable_pod_security_policy = True if disable_pod_security_policy: instance.enable_pod_security_policy = False if disable_local_accounts and enable_local_accounts: raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts ' 'at the same time.') if disable_local_accounts: instance.disable_local_accounts = True if enable_local_accounts: instance.disable_local_accounts = False if update_lb_profile: instance.network_profile.load_balancer_profile = update_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout, instance.network_profile.load_balancer_profile) if update_natgw_profile: from azext_aks_preview.decorator import AKSPreviewModels # store all the models used by nat gateway nat_gateway_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).nat_gateway_models instance.network_profile.nat_gateway_profile = update_nat_gateway_profile( nat_gateway_managed_outbound_ip_count, nat_gateway_idle_timeout, instance.network_profile.nat_gateway_profile, models=nat_gateway_models, ) if attach_acr and detach_acr: raise CLIError( 'Cannot specify "--attach-acr" and "--detach-acr" at the same time.') if uptime_sla and no_uptime_sla: raise CLIError( 'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.') if uptime_sla: instance.sku = ManagedClusterSKU( name="Basic", tier="Paid" ) if no_uptime_sla: instance.sku = ManagedClusterSKU( name="Basic", tier="Free" ) subscription_id = get_subscription_id(cmd.cli_ctx) client_id = "" if _is_msi_cluster(instance): if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None: raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. ' 'Please do not set --attach-acr or --detach-acr. ' 'You can manually grant or revoke permission to the identity named ' '<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.') client_id = instance.identity_profile["kubeletidentity"].client_id else: client_id = instance.service_principal_profile.client_id if not client_id: raise CLIError('Cannot get the AKS cluster\'s service principal.') if attach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if detach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=detach_acr, subscription_id=subscription_id, detach=True) # empty string is valid as it disables ip whitelisting if api_server_authorized_ip_ranges is not None: instance.api_server_access_profile = \ _populate_api_server_access_profile( api_server_authorized_ip_ranges, instance) if enable_aad: if instance.aad_profile is not None and instance.aad_profile.managed: raise CLIError( 'Cannot specify "--enable-aad" if managed AAD is already enabled') instance.aad_profile = ManagedClusterAADProfile( managed=True ) if update_aad_profile: if instance.aad_profile is None or not instance.aad_profile.managed: raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"' ' if managed AAD is not enabled') if aad_tenant_id is not None: instance.aad_profile.tenant_id = aad_tenant_id if aad_admin_group_object_ids is not None: # ids -> i_ds due to track 2 naming issue instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list( aad_admin_group_object_ids) if enable_azure_rbac and disable_azure_rbac: raise CLIError( 'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time') if enable_azure_rbac: instance.aad_profile.enable_azure_rbac = True if disable_azure_rbac: instance.aad_profile.enable_azure_rbac = False if enable_ahub and disable_ahub: raise CLIError( 'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time') if enable_ahub: instance.windows_profile.license_type = 'Windows_Server' if disable_ahub: instance.windows_profile.license_type = 'None' if enable_public_fqdn and disable_public_fqdn: raise MutuallyExclusiveArgumentError( 'Cannot specify "--enable-public-fqdn" and "--disable-public-fqdn" at the same time') is_private_cluster = instance.api_server_access_profile is not None and instance.api_server_access_profile.enable_private_cluster if enable_public_fqdn: if not is_private_cluster: raise ArgumentUsageError('--enable-public-fqdn can only be used for private cluster') instance.api_server_access_profile.enable_private_cluster_public_fqdn = True if disable_public_fqdn: if not is_private_cluster: raise ArgumentUsageError('--disable-public-fqdn can only be used for private cluster') if instance.api_server_access_profile.private_dns_zone.lower() == CONST_PRIVATE_DNS_ZONE_NONE: raise ArgumentUsageError('--disable-public-fqdn cannot be applied for none mode private dns zone cluster') instance.api_server_access_profile.enable_private_cluster_public_fqdn = False if auto_upgrade_channel is not None: if instance.auto_upgrade_profile is None: instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile() instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel if not enable_managed_identity and assign_identity: raise CLIError( '--assign-identity can only be specified when --enable-managed-identity is specified') current_identity_type = "spn" if instance.identity is not None: current_identity_type = instance.identity.type.casefold() goal_identity_type = current_identity_type if enable_managed_identity: if not assign_identity: goal_identity_type = "systemassigned" else: goal_identity_type = "userassigned" if current_identity_type != goal_identity_type: from knack.prompting import prompt_y_n msg = "" if current_identity_type == "spn": msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n' 'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet ' 'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n ' 'Are you sure you want to perform this operation?').format(goal_identity_type) else: msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n' 'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type) if not yes and not prompt_y_n(msg, default="n"): return None if goal_identity_type == "systemassigned": instance.identity = ManagedClusterIdentity( type="SystemAssigned" ) elif goal_identity_type == "userassigned": user_assigned_identity = { assign_identity: ManagedServiceIdentityUserAssignedIdentitiesValue() } instance.identity = ManagedClusterIdentity( type="UserAssigned", user_assigned_identities=user_assigned_identity ) if enable_pod_identity: if not _is_pod_identity_addon_enabled(instance): # we only rebuild the pod identity profile if it's disabled before _update_addon_pod_identity( instance, enable=True, allow_kubenet_consent=enable_pod_identity_with_kubenet, ) if disable_pod_identity: _update_addon_pod_identity(instance, enable=False) azure_keyvault_secrets_provider_addon_profile = None monitoring_addon_enabled = False ingress_appgw_addon_enabled = False virtual_node_addon_enabled = False if instance.addon_profiles is not None: azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None) azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \ instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled if enable_secret_rotation: if not azure_keyvault_secrets_provider_enabled: raise ArgumentUsageError( '--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled') azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" if disable_secret_rotation: if not azure_keyvault_secrets_provider_enabled: raise ArgumentUsageError( '--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled') azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false" if rotation_poll_interval is not None: if not azure_keyvault_secrets_provider_enabled: raise ArgumentUsageError( '--rotation-poll-interval can only be specified when azure-keyvault-secrets-provider is enabled') azure_keyvault_secrets_provider_addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval if tags: instance.tags = tags if nodepool_labels is not None: for agent_profile in instance.agent_pool_profiles: agent_profile.node_labels = nodepool_labels if windows_admin_password: instance.windows_profile.admin_password = windows_admin_password if enable_windows_gmsa: instance.windows_profile.gmsa_profile = WindowsGmsaProfile(enabled=True) if gmsa_dns_server is not None and gmsa_root_domain_name is not None: instance.windows_profile.gmsa_profile.dns_server = gmsa_dns_server instance.windows_profile.gmsa_profile.root_domain_name = gmsa_root_domain_name elif gmsa_dns_server is None and gmsa_root_domain_name is None: msg = ('Please assure that you have set the DNS server in the vnet used by the cluster when not specifying --gmsa-dns-server and --gmsa-root-domain-name') from knack.prompting import prompt_y_n if not yes and not prompt_y_n(msg, default="n"): return None else: raise ArgumentUsageError( 'You must set or not set --gmsa-dns-server and --gmsa-root-domain-name at the same time.') else: if gmsa_dns_server is not None or gmsa_root_domain_name is not None: raise ArgumentUsageError( 'You only can set --gmsa-dns-server and --gmsa-root-domain-name when setting --enable-windows-gmsa.') headers = get_aks_custom_headers(aks_custom_headers) return _put_managed_cluster_ensuring_permission(cmd, client, subscription_id, resource_group_name, name, instance, monitoring_addon_enabled, ingress_appgw_addon_enabled, virtual_node_addon_enabled, False, instance.agent_pool_profiles[0].vnet_subnet_id, _is_msi_cluster(instance), attach_acr, headers, no_wait) # pylint: disable=unused-argument def aks_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) if managed_cluster.agent_pool_profiles is not None: for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def aks_get_credentials(cmd, # pylint: disable=unused-argument client, resource_group_name, name, admin=False, user='clusterUser', path=os.path.join(os.path.expanduser( '~'), '.kube', 'config'), overwrite_existing=False, context_name=None, public_fqdn=False): credentialResults = None serverType = None if public_fqdn: serverType = 'public' if admin: credentialResults = client.list_cluster_admin_credentials( resource_group_name, name, serverType) else: if user.lower() == 'clusteruser': credentialResults = client.list_cluster_user_credentials( resource_group_name, name, serverType) elif user.lower() == 'clustermonitoringuser': credentialResults = client.list_cluster_monitoring_user_credentials( resource_group_name, name, serverType) else: raise CLIError("The user is invalid.") if not credentialResults: raise CLIError("No Kubernetes credentials found.") try: kubeconfig = credentialResults.kubeconfigs[0].value.decode( encoding='UTF-8') _print_or_merge_credentials( path, kubeconfig, overwrite_existing, context_name) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") # pylint: disable=line-too-long def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals client, resource_group_name, name, storage_account=None, sas_token=None, container_logs=None, kube_objects=None, node_logs=None): colorama.init() mc = client.get(resource_group_name, name) if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') storage_account_id = None if storage_account is None: print("No storage account specified. Try getting storage account from diagnostic settings") storage_account_id = get_storage_account_from_diag_settings( cmd.cli_ctx, resource_group_name, name) if storage_account_id is None: raise CLIError( "A storage account must be specified, since there isn't one in the diagnostic settings.") from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id if storage_account_id is None: if not is_valid_resource_id(storage_account): storage_account_id = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Storage', type='storageAccounts', name=storage_account ) else: storage_account_id = storage_account if is_valid_resource_id(storage_account_id): try: parsed_storage_account = parse_resource_id(storage_account_id) except CloudError as ex: raise CLIError(ex.message) else: raise CLIError("Invalid storage account id %s" % storage_account_id) storage_account_name = parsed_storage_account['name'] readonly_sas_token = None if sas_token is None: storage_client = cf_storage( cmd.cli_ctx, parsed_storage_account['subscription']) storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'], storage_account_name) kwargs = { 'account_name': storage_account_name, 'account_key': storage_account_keys.keys[0].value } cloud_storage_client = cloud_storage_account_service_factory( cmd.cli_ctx, kwargs) sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rwdlacup', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rl', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = readonly_sas_token.strip('?') from knack.prompting import prompt_y_n print() print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and ' f'save them to the storage account ' f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as ' f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.') print() print('If you share access to that storage account to Azure support, you consent to the terms outlined' f' in {format_hyperlink("http://aka.ms/DiagConsent")}.') print() if not prompt_y_n('Do you confirm?', default="n"): return print() print("Getting credentials for cluster %s " % name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) print() print("Starts collecting diag info for cluster %s " % name) # Form containerName from fqdn, as it was previously jsut the location of code is changed. # https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names maxContainerNameLength = 63 fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn normalized_container_name = fqdn.replace('.', '-') len_of_container_name = normalized_container_name.index("-hcp-") if len_of_container_name == -1: len_of_container_name = maxContainerNameLength container_name = normalized_container_name[:len_of_container_name] sas_token = sas_token.strip('?') deployment_yaml = _read_periscope_yaml() deployment_yaml = deployment_yaml.replace("# <accountName, string>", storage_account_name) deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>", (base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii')) deployment_yaml = deployment_yaml.replace("# <containerName, string>", container_name) yaml_lines = deployment_yaml.splitlines() for index, line in enumerate(yaml_lines): if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None: yaml_lines[index] = line + ' ' + container_logs if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None: yaml_lines[index] = line + ' ' + kube_objects if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None: yaml_lines[index] = line + ' ' + node_logs deployment_yaml = '\n'.join(yaml_lines) fd, temp_yaml_path = tempfile.mkstemp() temp_yaml_file = os.fdopen(fd, 'w+t') try: temp_yaml_file.write(deployment_yaml) temp_yaml_file.flush() temp_yaml_file.close() try: print() print("Cleaning up aks-periscope resources if existing") subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "serviceaccount,configmap,daemonset,secret", "--all", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding-view", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRole", "aks-periscope-role", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "--all", "apd", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.DEVNULL) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "CustomResourceDefinition", "diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"], stderr=subprocess.STDOUT) print() print("Deploying aks-periscope") subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f", temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: raise CLIError(err.output) finally: os.remove(temp_yaml_path) print() token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \ f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}" print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}') print() print(f'You can download Azure Storage Explorer here ' f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}' f' to check the logs by adding the storage account using the following URL:') print(f'{format_hyperlink(log_storage_account_url)}') print() if not prompt_y_n('Do you want to see analysis results now?', default="n"): print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' " f"anytime to check the analysis results.") else: display_diagnostics_report(temp_kubeconfig_path) def _read_periscope_yaml(): curr_dir = os.path.dirname(os.path.realpath(__file__)) periscope_yaml_file = os.path.join(curr_dir, "deploymentyaml", "aks-periscope.yaml") yaml_file = open(periscope_yaml_file, "r") data_loaded = yaml_file.read() return data_loaded def aks_kanalyze(cmd, client, resource_group_name, name): colorama.init() client.get(resource_group_name, name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) display_diagnostics_report(temp_kubeconfig_path) def aks_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) _fill_defaults_for_pod_identity_profile(instance.pod_identity_profile) if len(instance.agent_pool_profiles) > 1 and nodepool_name == "": raise CLIError('There are more than one node pool in the cluster. ' 'Please specify nodepool name or use az aks nodepool command to scale node pool') for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): if agent_profile.enable_auto_scaling: raise CLIError( "Cannot scale cluster autoscaler enabled node pool.") agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements client, resource_group_name, name, kubernetes_version='', control_plane_only=False, no_wait=False, node_image_only=False, aks_custom_headers=None, yes=False): from knack.prompting import prompt_y_n msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?' if not yes and not prompt_y_n(msg, default="n"): return None instance = client.get(resource_group_name, name) _fill_defaults_for_pod_identity_profile(instance.pod_identity_profile) vmas_cluster = False for agent_profile in instance.agent_pool_profiles: if agent_profile.type.lower() == "availabilityset": vmas_cluster = True break if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. ' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: msg = "This node image upgrade operation will run across every node pool in the cluster " \ "and might take a while. Do you wish to continue?" if not yes and not prompt_y_n(msg, default="n"): return None # This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all # nodepools of a cluster. The SDK only support upgrade single nodepool at a time. for agent_pool_profile in instance.agent_pool_profiles: if vmas_cluster: raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation ' 'can only be applied on VirtualMachineScaleSets cluster.') agent_pool_client = cf_agent_pools(cmd.cli_ctx) _upgrade_single_nodepool_image_version( True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None) mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) upgrade_all = False instance.kubernetes_version = kubernetes_version # for legacy clusters, we always upgrade node pools with CCP. if instance.max_agent_pools < 8 or vmas_cluster: if control_plane_only: msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be " "upgraded to {} as well. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: if not control_plane_only: msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane " "AND all nodepools to version {}. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. " "Node pool will not change. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None if upgrade_all: for agent_profile in instance.agent_pool_profiles: agent_profile.orchestrator_version = kubernetes_version agent_profile.creation_data = None # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers) def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None): headers = {} if snapshot_id: headers["AKSSnapshotId"] = snapshot_id return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers) def _handle_addons_args(cmd, # pylint: disable=too-many-statements addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, aci_subnet_name=None, vnet_subnet_id=None, enable_secret_rotation=False, rotation_poll_interval=None,): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('http_application_routing') if 'kube-dashboard' in addons: addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('kube-dashboard') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id) addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id, CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring}) addons.remove('monitoring') elif workspace_resource_id: raise CLIError( '"--workspace-resource-id" requires "--enable-addons monitoring".') if 'azure-policy' in addons: addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('azure-policy') if 'gitops' in addons: addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True) addons.remove('gitops') if 'ingress-appgw' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile addons.remove('ingress-appgw') if 'open-service-mesh' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile addons.remove('open-service-mesh') if 'azure-keyvault-secrets-provider' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"}) if enable_secret_rotation: addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" if rotation_poll_interval is not None: addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile addons.remove('azure-keyvault-secrets-provider') if 'confcom' in addons: addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile addons.remove('confcom') if 'virtual-node' in addons: if not aci_subnet_name or not vnet_subnet_id: raise CLIError( '"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".') # TODO: how about aciConnectorwindows, what is its addon name? os_type = 'Linux' addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile( enabled=True, config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name} ) addons.remove('virtual-node') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, fqdn_subdomain=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal( subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') if dns_name_prefix: url = 'http://{}.{}.{}.cloudapp.azure.com'.format( salt, dns_name_prefix, location) else: url = 'http://{}.{}.{}.cloudapp.azure.com'.format( salt, fqdn_subdomain, location) service_principal = _build_service_principal( rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError( '--client-secret is required if --service-principal is specified') store_acs_service_principal( subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile): if enable_cluster_autoscaler: if min_count is None or max_count is None: raise CLIError( 'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled') if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError( 'node-count is not in the range of min-count and max-count') agent_pool_profile.min_count = int(min_count) agent_pool_profile.max_count = int(max_count) agent_pool_profile.enable_auto_scaling = True else: if min_count is not None or max_count is not None: raise CLIError( 'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag') def _create_client_secret(): # Add a special character to satsify AAD SP secret requirements special_char = '$' client_secret = binascii.b2a_hex( os.urandom(10)).decode('utf-8') + special_char return client_secret def _ensure_aks_acr(cli_ctx, client_id, acr_name_or_id, subscription_id, # pylint: disable=unused-argument detach=False): from msrestazure.tools import is_valid_resource_id, parse_resource_id # Check if the ACR exists by resource ID. if is_valid_resource_id(acr_name_or_id): try: parsed_registry = parse_resource_id(acr_name_or_id) acr_client = cf_container_registry_service( cli_ctx, subscription_id=parsed_registry['subscription']) registry = acr_client.registries.get( parsed_registry['resource_group'], parsed_registry['name']) except CloudError as ex: raise CLIError(ex.message) _ensure_aks_acr_role_assignment( cli_ctx, client_id, registry.id, detach) return # Check if the ACR exists by name accross all resource groups. registry_name = acr_name_or_id registry_resource = 'Microsoft.ContainerRegistry/registries' try: registry = get_resource_by_name( cli_ctx, registry_name, registry_resource) except CloudError as ex: if 'was not found' in ex.message: raise CLIError( "ACR {} not found. Have you provided the right ACR name?".format(registry_name)) raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return def _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry_id, detach=False): if detach: if not _delete_role_assignments(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not delete role assignments for ACR. ' 'Are you an Owner on this subscription?') return if not add_role_assignment(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not create a role assignment for ACR. ' 'Are you an Owner on this subscription?') return def aks_agentpool_show(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name): instance = client.get(resource_group_name, cluster_name, nodepool_name) return instance def aks_agentpool_list(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name): return client.list(resource_group_name, cluster_name) def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals client, resource_group_name, cluster_name, nodepool_name, tags=None, kubernetes_version=None, node_zones=None, enable_node_public_ip=False, node_public_ip_prefix_id=None, node_vm_size=None, node_osdisk_type=None, node_osdisk_size=0, node_count=3, vnet_subnet_id=None, pod_subnet_id=None, ppg=None, max_pods=0, os_type=None, os_sku=None, enable_fips_image=False, min_count=None, max_count=None, enable_cluster_autoscaler=False, scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE, node_taints=None, priority=CONST_SCALE_SET_PRIORITY_REGULAR, eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE, spot_max_price=float('nan'), labels=None, max_surge=None, mode="User", aks_custom_headers=None, kubelet_config=None, linux_os_config=None, enable_encryption_at_host=False, enable_ultra_ssd=False, workload_runtime=None, gpu_instance_profile=None, snapshot_id=None, no_wait=False): instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name == nodepool_name: raise CLIError("Node pool {} already exists, please try a different name, " "use 'aks nodepool list' to get current list of node pool".format(nodepool_name)) upgradeSettings = AgentPoolUpgradeSettings() taints_array = [] creationData = None if snapshot_id: snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id) if not kubernetes_version: kubernetes_version = snapshot.kubernetes_version if not os_type: os_type = snapshot.os_type if not os_sku: os_sku = snapshot.os_sku if not node_vm_size: node_vm_size = snapshot.vm_size creationData = CreationData( source_resource_id=snapshot_id ) if not os_type: os_type = "Linux" if node_taints is not None: for taint in node_taints.split(','): try: taint = taint.strip() taints_array.append(taint) except ValueError: raise CLIError( 'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".') if node_vm_size is None: if os_type == "Windows": node_vm_size = "Standard_D2s_v3" else: node_vm_size = "Standard_DS2_v2" if max_surge: upgradeSettings.max_surge = max_surge agent_pool = AgentPool( name=nodepool_name, tags=tags, node_labels=labels, count=int(node_count), vm_size=node_vm_size, os_type=os_type, os_sku=os_sku, enable_fips=enable_fips_image, storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, pod_subnet_id=pod_subnet_id, proximity_placement_group_id=ppg, agent_pool_type="VirtualMachineScaleSets", max_pods=int(max_pods) if max_pods else None, orchestrator_version=kubernetes_version, availability_zones=node_zones, enable_node_public_ip=enable_node_public_ip, node_public_ip_prefix_id=node_public_ip_prefix_id, node_taints=taints_array, scale_set_priority=priority, scale_down_mode=scale_down_mode, upgrade_settings=upgradeSettings, enable_encryption_at_host=enable_encryption_at_host, enable_ultra_ssd=enable_ultra_ssd, mode=mode, workload_runtime=workload_runtime, gpu_instance_profile=gpu_instance_profile, creation_data=creationData ) if priority == CONST_SCALE_SET_PRIORITY_SPOT: agent_pool.scale_set_eviction_policy = eviction_policy if isnan(spot_max_price): spot_max_price = -1 agent_pool.spot_max_price = spot_max_price _check_cluster_autoscaler_flag( enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool) if node_osdisk_size: agent_pool.os_disk_size_gb = int(node_osdisk_size) if node_osdisk_type: agent_pool.os_disk_type = node_osdisk_type if kubelet_config: agent_pool.kubelet_config = _get_kubelet_config(kubelet_config) if linux_os_config: agent_pool.linux_os_config = _get_linux_os_config(linux_os_config) headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers) def aks_agentpool_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, node_count=3, no_wait=False): instance = client.get(resource_group_name, cluster_name, nodepool_name) new_node_count = int(node_count) if instance.enable_auto_scaling: raise CLIError("Cannot scale cluster autoscaler enabled node pool.") if new_node_count == instance.count: raise CLIError( "The new node count is the same as the current node count.") instance.count = new_node_count # pylint: disable=no-member return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, kubernetes_version='', no_wait=False, node_image_only=False, max_surge=None, aks_custom_headers=None, snapshot_id=None): if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: return _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id) creationData = None if snapshot_id: snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id) if not kubernetes_version and not node_image_only: kubernetes_version = snapshot.kubernetes_version creationData = CreationData( source_resource_id=snapshot_id ) instance = client.get(resource_group_name, cluster_name, nodepool_name) instance.orchestrator_version = kubernetes_version instance.creation_data = creationData if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers) def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name): return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name) def aks_agentpool_update(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, tags=None, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, scale_down_mode=None, min_count=None, max_count=None, max_surge=None, mode=None, labels=None, no_wait=False): update_autoscaler = enable_cluster_autoscaler + \ disable_cluster_autoscaler + update_cluster_autoscaler if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and not labels): raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels"') instance = client.get(resource_group_name, cluster_name, nodepool_name) if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count.') if enable_cluster_autoscaler: if instance.enable_auto_scaling: logger.warning('Autoscaler is already enabled for this node pool.\n' 'Please run "az aks nodepool update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.min_count = int(min_count) instance.max_count = int(max_count) instance.enable_auto_scaling = True if update_cluster_autoscaler: if not instance.enable_auto_scaling: raise CLIError('Autoscaler is not enabled for this node pool.\n' 'Run "az aks nodepool update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.min_count = int(min_count) instance.max_count = int(max_count) if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge if disable_cluster_autoscaler: if not instance.enable_auto_scaling: logger.warning( 'Autoscaler is already disabled for this node pool.') return None instance.enable_auto_scaling = False instance.min_count = None instance.max_count = None instance.tags = tags if scale_down_mode is not None: instance.scale_down_mode = scale_down_mode if mode is not None: instance.mode = mode if labels is not None: instance.node_labels = labels return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_stop(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, aks_custom_headers=None, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise InvalidArgumentValueError( "Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name)) instance = client.get(resource_group_name, cluster_name, nodepool_name) power_state = PowerState(code="Stopped") instance.power_state = power_state headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers) def aks_agentpool_start(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, aks_custom_headers=None, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise InvalidArgumentValueError( "Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name)) instance = client.get(resource_group_name, cluster_name, nodepool_name) power_state = PowerState(code="Running") instance.power_state = power_state headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers) def aks_agentpool_delete(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise CLIError("Node pool {} doesnt exist, " "use 'aks nodepool list' to get current node pool list".format(nodepool_name)) return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name) def aks_addon_list_available(): available_addons = [] for k, v in ADDONS.items(): available_addons.append({ "name": k, "description": ADDONS_DESCRIPTIONS[v] }) return available_addons def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument addon_profiles = client.get(resource_group_name, name).addon_profiles current_addons = [] for name, addon in ADDONS.items(): if not addon_profiles or addon not in addon_profiles: current_addons.append({ "name": name, "api_key": addon, "enabled": False }) else: current_addons.append({ "name": name, "api_key": addon, "enabled": addon_profiles[addon].enabled }) return current_addons def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument addon_profiles = client.get(resource_group_name, name).addon_profiles addon_key = ADDONS[addon] if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled: raise CLIError(f'Addon "{addon}" is not enabled in this cluster.') return { "name": addon, "api_key": addon_key, "config": addon_profiles[addon_key].config, "identity": addon_profiles[addon_key].identity } def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False): return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring) def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False): return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait) def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False): addon_profiles = client.get(resource_group_name, name).addon_profiles addon_key = ADDONS[addon] if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled: raise CLIError(f'Addon "{addon}" is not enabled in this cluster.') return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring) def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) try: if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \ CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \ str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true': # remove the DCR association because otherwise the DCR can't be deleted ensure_container_insights_for_monitoring( cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, remove_monitoring=True, aad_route=True, create_dcr=False, create_dcra=True ) except TypeError: pass instance = _update_addons( cmd, instance, subscription_id, resource_group_name, name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False): instance = client.get(resource_group_name, name) msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here subscription_id = get_subscription_id(cmd.cli_ctx) instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True, workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait) if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled: if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \ str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true': if not msi_auth: raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.") else: # create a Data Collection Rule (DCR) and associate it with the cluster ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True) else: # monitoring addon will use legacy path ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False) monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' enable_virtual_node = False if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles: enable_virtual_node = True need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node if need_post_creation_role_assignment: # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)( client.begin_create_or_update(resource_group_name, name, instance)) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if monitoring and cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) add_monitoring_role_assignment(result, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: add_ingress_appgw_addon_role_assignment(result, cmd) if enable_virtual_node: # All agent pool will reside in the same vnet, we will grant vnet level Contributor role # in later function, so using a random agent pool here is OK random_agent_pool = result.agent_pool_profiles[0] if random_agent_pool.vnet_subnet_id != "": add_virtual_node_role_assignment( cmd, result, random_agent_pool.vnet_subnet_id) # Else, the cluster is not using custom VNet, the permission is already granted in AKS RP, # we don't need to handle it in client side in this case. else: result = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) return result def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name) def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements instance, subscription_id, resource_group_name, name, addons, enable, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, disable_secret_rotation=False, rotation_poll_interval=None, no_wait=False): # pylint: disable=unused-argument # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} os_type = 'Linux' # for each addons argument for addon_arg in addon_args: if addon_arg not in ADDONS: raise CLIError("Invalid addon name: {}.".format(addon_arg)) addon = ADDONS[addon_arg] if addon == CONST_VIRTUAL_NODE_ADDON_NAME: # only linux is supported for now, in the future this will be a user flag addon += os_type # honor addon names defined in Azure CLI for key in list(addon_profiles): if key.lower() == addon.lower() and key != addon: addon_profiles[addon] = addon_profiles.pop(key) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get( addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == CONST_MONITORING_ADDON_NAME: logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id) addon_profile.config = {logAnalyticsConstName: workspace_resource_id} addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError( 'The aci-connector addon requires setting a subnet name.') addon_profile.config = { CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name} elif addon == CONST_INGRESS_APPGW_ADDON_NAME: if addon_profile.enabled: raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n' 'To change ingress-appgw configuration, run ' f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME: if addon_profile.enabled: raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n' 'To change open-service-mesh configuration, run ' f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={}) elif addon == CONST_CONFCOM_ADDON_NAME: if addon_profile.enabled: raise CLIError('The confcom addon is already enabled for this managed cluster.\n' 'To change confcom configuration, run ' f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: if addon_profile.enabled: raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n' 'To change azure-keyvault-secrets-provider configuration, run ' f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"}) if enable_secret_rotation: addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" if disable_secret_rotation: addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false" if rotation_poll_interval is not None: addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: if addon == CONST_KUBE_DASHBOARD_ADDON_NAME: addon_profiles[addon] = ManagedClusterAddonProfile( enabled=False) else: raise CLIError( "The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument return client.get_os_options(location, resource_type='managedClusters') def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations( path, temp_path, overwrite_existing, context_name) except yaml.YAMLError as ex: logger.warning( 'Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _handle_merge(existing, addition, key, replace): if not addition[key]: return if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: from knack.prompting import prompt_y_n msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) if context_name is not None: addition['contexts'][0]['name'] = context_name addition['contexts'][0]['context']['cluster'] = context_name addition['clusters'][0]['name'] = context_name addition['current-context'] = context_name # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError( 'failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format( stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format( current_context, existing_file) print(msg) def cloud_storage_account_service_factory(cli_ctx, kwargs): from azure.cli.core.profiles import ResourceType, get_sdk t_cloud_storage_account = get_sdk( cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount') account_name = kwargs.pop('account_name', None) account_key = kwargs.pop('account_key', None) sas_token = kwargs.pop('sas_token', None) kwargs.pop('connection_string', None) return t_cloud_storage_account(account_name, account_key, sas_token) def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name): from azure.mgmt.monitor import MonitorManagementClient diag_settings_client = get_mgmt_service_client( cli_ctx, MonitorManagementClient).diagnostic_settings subscription_id = get_subscription_id(cli_ctx) aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \ '/managedClusters/{2}'.format(subscription_id, resource_group_name, name) diag_settings = diag_settings_client.list(aks_resource_id) if diag_settings.value: return diag_settings.value[0].storage_account_id print("No diag settings specified") return None def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') nodes = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"], universal_newlines=True) logger.debug(nodes) node_lines = nodes.splitlines() ready_nodes = {} for node_line in node_lines: columns = node_line.split() logger.debug(node_line) if columns[1] != "Ready": logger.warning( "Node %s is not Ready. Current state is: %s.", columns[0], columns[1]) else: ready_nodes[columns[0]] = False logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes))) if not ready_nodes: logger.warning( 'No nodes are ready in the current cluster. Diagnostics info might not be available.') network_config_array = [] network_status_array = [] apds_created = False max_retry = 10 for retry in range(0, max_retry): if not apds_created: apd = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"], universal_newlines=True ) apd_lines = apd.splitlines() if apd_lines and 'No resources found' in apd_lines[0]: apd_lines.pop(0) print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines), len(ready_nodes), '.' * retry), end='') if len(apd_lines) < len(ready_nodes): time.sleep(3) else: apds_created = True print() else: for node_name in ready_nodes: if ready_nodes[node_name]: continue apdName = "aks-periscope-diagnostic-" + node_name try: network_config = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkconfig}"], universal_newlines=True) logger.debug('Dns status for node %s is %s', node_name, network_config) network_status = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkoutbound}"], universal_newlines=True) logger.debug('Network status for node %s is %s', node_name, network_status) if not network_config or not network_status: print("The diagnostics information for node {} is not ready yet. " "Will try again in 10 seconds.".format(node_name)) time.sleep(10) break network_config_array += json.loads( '[' + network_config + ']') network_status_object = json.loads(network_status) network_status_array += format_diag_status( network_status_object) ready_nodes[node_name] = True except subprocess.CalledProcessError as err: raise CLIError(err.output) print() if network_config_array: print("Below are the network configuration for each node: ") print() print(tabulate(network_config_array, headers="keys", tablefmt='simple')) print() else: logger.warning("Could not get network config. " "Please run 'az aks kanalyze' command later to get the analysis results.") if network_status_array: print("Below are the network connectivity results for each node:") print() print(tabulate(network_status_array, headers="keys", tablefmt='simple')) else: logger.warning("Could not get networking status. " "Please run 'az aks kanalyze' command later to get the analysis results.") def format_diag_status(diag_status): for diag in diag_status: if diag["Status"]: if "Error:" in diag["Status"]: diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}' else: diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}' return diag_status def format_bright(msg): return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}' def format_hyperlink(the_link): return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}' def get_aks_custom_headers(aks_custom_headers=None): headers = {} if aks_custom_headers is not None: if aks_custom_headers != "": for pair in aks_custom_headers.split(','): parts = pair.split('=') if len(parts) != 2: raise CLIError('custom headers format is incorrect') headers[parts[0]] = parts[1] return headers def _put_managed_cluster_ensuring_permission( cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches client, subscription_id, resource_group_name, name, managed_cluster, monitoring_addon_enabled, ingress_appgw_addon_enabled, virtual_node_addon_enabled, need_grant_vnet_permission_to_cluster_identity, vnet_subnet_id, enable_managed_identity, attach_acr, headers, no_wait ): # some addons require post cluster creation role assigment need_post_creation_role_assignment = (monitoring_addon_enabled or ingress_appgw_addon_enabled or (enable_managed_identity and attach_acr) or virtual_node_addon_enabled or need_grant_vnet_permission_to_cluster_identity) if need_post_creation_role_assignment: # adding a wait here since we rely on the result for role assignment cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update( resource_group_name=resource_group_name, resource_name=name, parameters=managed_cluster, headers=headers)) cloud_name = cmd.cli_ctx.cloud.name # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) add_monitoring_role_assignment(cluster, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: add_ingress_appgw_addon_role_assignment(cluster, cmd) if virtual_node_addon_enabled: add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id) if need_grant_vnet_permission_to_cluster_identity: if not create_role_assignment(cmd.cli_ctx, 'Network Contributor', cluster.identity.principal_id, scope=vnet_subnet_id, resolve_assignee=False): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') if enable_managed_identity and attach_acr: # Attach ACR to cluster enabled managed identity if cluster.identity_profile is None or \ cluster.identity_profile["kubeletidentity"] is None: logger.warning('Your cluster is successfully created, but we failed to attach ' 'acr to it, you can manually grant permission to the identity ' 'named <ClUSTER_NAME>-agentpool in MC_ resource group to give ' 'it permission to pull from ACR.') else: kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id _ensure_aks_acr(cmd.cli_ctx, client_id=kubelet_identity_client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) else: cluster = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=managed_cluster, headers=headers) return cluster def _is_msi_cluster(managed_cluster): return (managed_cluster and managed_cluster.identity and (managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned")) def _get_kubelet_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) kubelet_config = get_file_json(file_path) if not isinstance(kubelet_config, dict): raise CLIError( "Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object = KubeletConfig() config_object.cpu_manager_policy = kubelet_config.get( "cpuManagerPolicy", None) config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None) config_object.cpu_cfs_quota_period = kubelet_config.get( "cpuCfsQuotaPeriod", None) config_object.image_gc_high_threshold = kubelet_config.get( "imageGcHighThreshold", None) config_object.image_gc_low_threshold = kubelet_config.get( "imageGcLowThreshold", None) config_object.topology_manager_policy = kubelet_config.get( "topologyManagerPolicy", None) config_object.allowed_unsafe_sysctls = kubelet_config.get( "allowedUnsafeSysctls", None) config_object.fail_swap_on = kubelet_config.get("failSwapOn", None) config_object.container_log_max_files = kubelet_config.get( "containerLogMaxFiles", None) config_object.container_log_max_size_mb = kubelet_config.get( "containerLogMaxSizeMB", None) config_object.pod_max_pids = kubelet_config.get( "podMaxPids", None) return config_object def _get_linux_os_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) os_config = get_file_json(file_path) if not isinstance(os_config, dict): raise CLIError( "Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object = LinuxOSConfig() config_object.transparent_huge_page_enabled = os_config.get( "transparentHugePageEnabled", None) config_object.transparent_huge_page_defrag = os_config.get( "transparentHugePageDefrag", None) config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None) # sysctl settings sysctls = os_config.get("sysctls", None) if not isinstance(sysctls, dict): raise CLIError( "Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object.sysctls = SysctlConfig() config_object.sysctls.net_core_somaxconn = sysctls.get( "netCoreSomaxconn", None) config_object.sysctls.net_core_netdev_max_backlog = sysctls.get( "netCoreNetdevMaxBacklog", None) config_object.sysctls.net_core_rmem_max = sysctls.get( "netCoreRmemMax", None) config_object.sysctls.net_core_wmem_max = sysctls.get( "netCoreWmemMax", None) config_object.sysctls.net_core_optmem_max = sysctls.get( "netCoreOptmemMax", None) config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get( "netIpv4TcpMaxSynBacklog", None) config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get( "netIpv4TcpMaxTwBuckets", None) config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get( "netIpv4TcpFinTimeout", None) config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get( "netIpv4TcpKeepaliveTime", None) config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get( "netIpv4TcpKeepaliveProbes", None) config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get( "netIpv4TcpkeepaliveIntvl", None) config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get( "netIpv4TcpRmem", None) config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get( "netIpv4TcpWmem", None) config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get( "netIpv4TcpTwReuse", None) config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get( "netIpv4IpLocalPortRange", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get( "netIpv4NeighDefaultGcThresh1", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get( "netIpv4NeighDefaultGcThresh2", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get( "netIpv4NeighDefaultGcThresh3", None) config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get( "netNetfilterNfConntrackMax", None) config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get( "netNetfilterNfConntrackBuckets", None) config_object.sysctls.fs_inotify_max_user_watches = sysctls.get( "fsInotifyMaxUserWatches", None) config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None) config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None) config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None) config_object.sysctls.kernel_threads_max = sysctls.get( "kernelThreadsMax", None) config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None) config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None) config_object.sysctls.vm_vfs_cache_pressure = sysctls.get( "vmVfsCachePressure", None) return config_object def _get_http_proxy_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) hp_config = get_file_json(file_path) if not isinstance(hp_config, dict): raise CLIError( "Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path)) config_object = ManagedClusterHTTPProxyConfig() config_object.http_proxy = hp_config.get("httpProxy", None) config_object.https_proxy = hp_config.get("httpsProxy", None) config_object.no_proxy = hp_config.get("noProxy", None) config_object.trusted_ca = hp_config.get("trustedCa", None) return config_object def _is_pod_identity_addon_enabled(instance): if not instance: return False if not instance.pod_identity_profile: return False return bool(instance.pod_identity_profile.enabled) def _ensure_pod_identity_addon_is_enabled(instance): if not _is_pod_identity_addon_enabled(instance): raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n' 'To enable, run "az aks update --enable-pod-identity') def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent): if not network_profile or not network_profile.network_plugin: # invalid data return if network_profile.network_plugin.lower() != 'kubenet': # not kubenet, no need to check return if customer_consent is None: # no set this time, read from previous value customer_consent = bool( pod_identity_profile.allow_network_plugin_kubenet) if not customer_consent: raise CLIError( '--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin') pod_identity_profile.allow_network_plugin_kubenet = True def _fill_defaults_for_pod_identity_exceptions(pod_identity_exceptions): if not pod_identity_exceptions: return for exc in pod_identity_exceptions: if exc.pod_labels is None: # in previous version, we accidentally allowed user to specify empty pod labels, # which will be converted to `None` in response. This behavior will break the extension # when using 2021-09-01 version. As a workaround, we always back fill the empty dict value # before sending to the server side. exc.pod_labels = dict() def _fill_defaults_for_pod_identity_profile(pod_identity_profile): if not pod_identity_profile: return _fill_defaults_for_pod_identity_exceptions(pod_identity_profile.user_assigned_identity_exceptions) def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None): if not enable: # when disable, remove previous saved value instance.pod_identity_profile = ManagedClusterPodIdentityProfile( enabled=False) return _fill_defaults_for_pod_identity_exceptions(pod_identity_exceptions) if not instance.pod_identity_profile: # not set before instance.pod_identity_profile = ManagedClusterPodIdentityProfile( enabled=enable, user_assigned_identities=pod_identities, user_assigned_identity_exceptions=pod_identity_exceptions, ) _ensure_pod_identity_kubenet_consent( instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent) instance.pod_identity_profile.enabled = enable instance.pod_identity_profile.user_assigned_identities = pod_identities or [] instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or [] def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope): cluster_identity_object_id = None if instance.identity.type.lower() == 'userassigned': for identity in instance.identity.user_assigned_identities.values(): cluster_identity_object_id = identity.principal_id break elif instance.identity.type.lower() == 'systemassigned': cluster_identity_object_id = instance.identity.principal_id else: raise CLIError('unsupported identity type: {}'.format( instance.identity.type)) if cluster_identity_object_id is None: raise CLIError('unable to resolve cluster identity') factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments cluster_identity_object_id = cluster_identity_object_id.lower() scope = scope.lower() # list all assignments of the target identity (scope) that assigned to the cluster identity filter_query = "atScope() and assignedTo('{}')".format(cluster_identity_object_id) for i in assignments_client.list_for_scope(scope=scope, filter=filter_query): if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID): continue # sanity checks to make sure we see the correct assignments if i.principal_id.lower() != cluster_identity_object_id: # assignedTo() should return the assignment to cluster identity continue if not scope.startswith(i.scope.lower()): # atScope() should return the assignments in subscription / resource group / resource level continue # already assigned logger.debug('Managed Identity Opereator role has been assigned to {}'.format(i.scope)) return if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id, is_service_principal=False, scope=scope): raise CLIError( 'Could not grant Managed Identity Operator permission for cluster') # need more time to propogate this assignment... print() print('Wait 30 seconds for identity role assignment propagation.') time.sleep(30) def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name, identity_name, identity_namespace, identity_resource_id, binding_selector=None, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) user_assigned_identity = _get_user_assigned_identity( cmd.cli_ctx, identity_resource_id) _ensure_managed_identity_operator_permission( cmd.cli_ctx, instance, user_assigned_identity.id) pod_identities = [] if instance.pod_identity_profile.user_assigned_identities: pod_identities = instance.pod_identity_profile.user_assigned_identities pod_identity = ManagedClusterPodIdentity( name=identity_name, namespace=identity_namespace, identity=UserAssignedIdentity( resource_id=user_assigned_identity.id, client_id=user_assigned_identity.client_id, object_id=user_assigned_identity.principal_id, ) ) if binding_selector is not None: pod_identity.binding_selector = binding_selector pod_identities.append(pod_identity) _update_addon_pod_identity( instance, enable=True, pod_identities=pod_identities, pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name, identity_name, identity_namespace, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identities = [] if instance.pod_identity_profile.user_assigned_identities: for pod_identity in instance.pod_identity_profile.user_assigned_identities: if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace: # to remove continue pod_identities.append(pod_identity) _update_addon_pod_identity( instance, enable=True, pod_identities=pod_identities, pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) return _remove_nulls([instance])[0] def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions exc = ManagedClusterPodIdentityException( name=exc_name, namespace=exc_namespace, pod_labels=pod_labels) pod_identity_exceptions.append(exc) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: for exc in instance.pod_identity_profile.user_assigned_identity_exceptions: if exc.name == exc_name and exc.namespace == exc_namespace: # to remove continue pod_identity_exceptions.append(exc) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) found_target = False updated_exc = ManagedClusterPodIdentityException( name=exc_name, namespace=exc_namespace, pod_labels=pod_labels) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: for exc in instance.pod_identity_profile.user_assigned_identity_exceptions: if exc.name == exc_name and exc.namespace == exc_namespace: found_target = True pod_identity_exceptions.append(updated_exc) else: pod_identity_exceptions.append(exc) if not found_target: raise CLIError( 'pod identity exception {}/{} not found'.format(exc_namespace, exc_name)) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name): instance = client.get(resource_group_name, cluster_name) return _remove_nulls([instance])[0] def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope.lower() != scope.lower(): continue if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID): continue if i.principal_id.lower() != cluster_identity_object_id.lower(): continue # already assigned return if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id, is_service_principal=False, scope=scope): raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope)) def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument return client.list_outbound_network_dependencies_endpoints(resource_group_name, name) def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches client, resource_group_name, name, nodepool_id, location=None, tags=None, aks_custom_headers=None, no_wait=False): rg_location = get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location creationData = CreationData( source_resource_id=nodepool_id ) snapshot = Snapshot( name=name, tags=tags, location=location, creation_data=creationData ) headers = get_aks_custom_headers(aks_custom_headers) return client.create_or_update(resource_group_name, name, snapshot, headers=headers) def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument snapshot = client.get(resource_group_name, name) return snapshot def aks_snapshot_delete(cmd, # pylint: disable=unused-argument client, resource_group_name, name, no_wait=False, yes=False): from knack.prompting import prompt_y_n msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(name, resource_group_name) if not yes and not prompt_y_n(msg, default="n"): return None return client.delete(resource_group_name, name) def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument if resource_group_name is None or resource_group_name == '': return client.list() return client.list_by_resource_group(resource_group_name)
__init__.py
""" Enable ptvsd debugger to attach to HA. Attach ptvsd debugger by default to port 5678. """ import logging from threading import Thread from asyncio import Event import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_PORT import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType, HomeAssistantType DOMAIN = "ptvsd" CONF_WAIT = "wait" _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_HOST, default="0.0.0.0"): cv.string, vol.Optional(CONF_PORT, default=5678): cv.port, vol.Optional(CONF_WAIT, default=False): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistantType, config: ConfigType): """Set up ptvsd debugger.""" import ptvsd conf = config[DOMAIN] host = conf[CONF_HOST] port = conf[CONF_PORT] ptvsd.enable_attach((host, port)) wait = conf[CONF_WAIT] if wait: _LOGGER.warning("Waiting for ptvsd connection on %s:%s", host, port) ready = Event() def waitfor(): ptvsd.wait_for_attach() hass.loop.call_soon_threadsafe(ready.set) Thread(target=waitfor).start() await ready.wait() else: _LOGGER.warning("Listening for ptvsd connection on %s:%s", host, port) return True
udp_handler.py
import datetime import sys import signal import threading from elasticsearch_raven import transport from elasticsearch_raven import utils class Handler(object): def __init__(self, sock, pending_logs, exception_handler, debug=False): self.sock = sock self.pending_logs = pending_logs self.exception_handler = exception_handler self.debug = debug self.should_finish = False def as_thread(self): handler = threading.Thread(target=self.handle) handler.daemon = True return handler def handle(self): try: try: while True: data, address = self.sock.recvfrom(65535) with utils.ignore_signals([signal.SIGTERM, signal.SIGQUIT]): message = transport.SentryMessage.create_from_udp(data) self.pending_logs.put(message) if self.debug: sys.stdout.write('{host}:{port} [{date}]\n'.format( host=address[0], port=address[1], date=datetime.datetime.now())) finally: self.sock.close() except Exception as e: self.exception_handler(e)
__init__.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright 2011-2020, Nigel Small # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from os import chmod, path from random import choice from shutil import rmtree from tempfile import mkdtemp from threading import Thread from time import sleep from docker import DockerClient from docker.errors import APIError, ImageNotFound from packaging.version import Version from py2neo.compat import perf_counter from py2neo.server.security import Auth, make_auth, install_certificate, install_private_key from py2neo.wiring import Address, Wire docker = DockerClient.from_env(version="auto") log = getLogger(__name__) def random_name(size): return "".join(choice("bcdfghjklmnpqrstvwxz") for _ in range(size)) class Neo4jInstance(object): """ A single Neo4j server instance, potentially part of a cluster. """ container = None ip_address = None ready = 0 def __init__(self, service, name, bolt_port=None, http_port=None, https_port=None): self.service = service self.name = name self.bolt_port = bolt_port self.http_port = http_port self.https_port = https_port self.address = self.addresses["bolt"] self.cert_volume_dir = None self.config = self._create_config(self.service) self.env = self._create_env(self.service) ports = {"7687/tcp": self.bolt_port, "7474/tcp": self.http_port} volumes = {} if self.service.secured: cert, key = self.service.cert_key_pair ports["7473/tcp"] = self.https_port self.cert_volume_dir = mkdtemp() chmod(self.cert_volume_dir, 0o755) log.debug("Using directory %r as shared certificate volume", self.cert_volume_dir) if self.service.image.version >= Version("4.0"): subdirectories = [path.join(self.cert_volume_dir, subdir) for subdir in ["bolt", "https"]] install_certificate(cert, "public.crt", *subdirectories) install_private_key(key, "private.key", *subdirectories) else: install_certificate(cert, "neo4j.cert", self.cert_volume_dir) install_private_key(key, "neo4j.key", self.cert_volume_dir) volumes[self.cert_volume_dir] = { "bind": "/var/lib/neo4j/certificates", "mode": "ro", } self.container = docker.containers.create( self.image.id, detach=True, environment=self.env, hostname=self.fq_name, name=self.fq_name, network=self.service.name, ports=ports, volumes=volumes, ) def __hash__(self): return hash(self.container) def __repr__(self): return "%s(fq_name={!r}, image={!r}, address={!r})".format( self.__class__.__name__, self.fq_name, self.image, self.address) def _create_config(self, service): config = { "dbms.backup.enabled": "false", "dbms.connector.bolt.advertised_address": "localhost:{}".format(self.bolt_port), "dbms.memory.heap.initial_size": "300m", "dbms.memory.heap.max_size": "500m", "dbms.memory.pagecache.size": "50m", "dbms.transaction.bookmark_ready_timeout": "5s", } # Security configuration if service.secured: if service.image.version >= Version("4.0"): config.update({ "dbms.ssl.policy.bolt.enabled": True, "dbms.ssl.policy.https.enabled": True, "dbms.connector.bolt.tls_level": "OPTIONAL", "dbms.connector.https.enabled": True, }) else: pass return config def _create_env(self, service): env = {} # Enterprise edition requires license agreement # TODO: make this externally explicit, somehow if service.image.edition == "enterprise": env["NEO4J_ACCEPT_LICENSE_AGREEMENT"] = "yes" # Add initial auth details if service.auth: env["NEO4J_AUTH"] = "/".join(service.auth) # Add config for key, value in self.config.items(): fixed_key = "NEO4J_" + key.replace("_", "__").replace(".", "_") env[fixed_key] = value return env @property def fq_name(self): return "{}.{}".format(self.name, self.service.name) @property def image(self): return self.service.image @property def addresses(self): addresses = { "bolt": Address(("localhost", self.bolt_port)), "http": Address(("localhost", self.http_port)), } if self.service.secured: addresses["bolt+s"] = Address(("localhost", self.bolt_port)) addresses["bolt+ssc"] = Address(("localhost", self.bolt_port)) addresses["https"] = Address(("localhost", self.https_port)) addresses["http+s"] = Address(("localhost", self.https_port)) addresses["http+ssc"] = Address(("localhost", self.https_port)) return addresses def start(self): log.info("Starting instance %r with image %r", self.fq_name, self.service.image) for scheme, address in self.addresses.items(): log.info(" at <%s://%s>", scheme, address) try: self.container.start() self.container.reload() self.ip_address = (self.container.attrs["NetworkSettings"] ["Networks"][self.service.name]["IPAddress"]) except APIError as e: log.exception(e) log.debug("Machine %r is bound to internal IP address %s", self.fq_name, self.ip_address) def _poll_bolt_address(self, count=240, interval=0.5, is_running=None): address = self.addresses["bolt"] t0 = perf_counter() for _ in range(count): if callable(is_running) and not is_running(): break wire = None try: wire = Wire.open(address, keep_alive=True) wire.write(b"\x60\x60\xB0\x17" b"\x00\x00\x01\x04" b"\x00\x00\x00\x04" b"\x00\x00\x00\x03" b"\x00\x00\x00\x02") wire.send() data = wire.read(4) except OSError: sleep(interval) else: t1 = perf_counter() - t0 log.info("Machine {!r} available " "for Bolt traffic " "after {:.02f}s".format(self.fq_name, t1)) return True finally: if wire: wire.close() return False def _poll_http_address(self, count=240, interval=0.5, is_running=None): address = self.addresses["http"] t0 = perf_counter() for _ in range(count): if callable(is_running) and not is_running(): break wire = None try: wire = Wire.open(address, keep_alive=True) wire.write("GET / HTTP/1.1\r\n" "Host: {}\r\n\r\n".format(address.host).encode("ASCII")) wire.send() data = wire.read(4) except OSError: sleep(interval) else: t1 = perf_counter() - t0 log.info("Machine {!r} available " "for HTTP traffic " "after {:.02f}s".format(self.fq_name, t1)) return True finally: if wire: wire.close() return False def await_started(self): sleep(1) def is_running(): self.container.reload() return self.container.status == "running" if is_running(): result = {} def poll_bolt_address(): result["bolt"] = self._poll_bolt_address(is_running=is_running) def poll_http_address(): result["http"] = self._poll_http_address(is_running=is_running) threads = [ Thread(target=poll_bolt_address), Thread(target=poll_http_address), ] for t in threads: t.start() for t in threads: t.join() if any(value is False for value in result.values()): self.container.reload() state = self.container.attrs["State"] if state["Status"] == "exited": self.ready = -1 log.error("Machine %r exited with code %r", self.fq_name, state["ExitCode"]) for line in self.container.logs().splitlines(): log.error("> %s" % line.decode("utf-8")) else: log.error("Machine %r did not " "become available", self.fq_name) else: self.ready = 1 else: log.error("Machine %r is not running (status=%r)", self.fq_name, self.container.status) for line in self.container.logs().splitlines(): log.error("> %s" % line.decode("utf-8")) def stop(self): log.info("Stopping instance %r", self.fq_name) self.container.stop() self.container.remove(force=True) if self.cert_volume_dir: log.debug("Removing directory %r", self.cert_volume_dir) rmtree(self.cert_volume_dir) class Neo4jService(object): """ A Neo4j database management service. """ default_bolt_port = 7687 default_http_port = 7474 default_https_port = 7473 @classmethod def single_instance(cls, name, image_tag, auth, cert_key_pair=None): service = cls(name, image_tag, auth, cert_key_pair) ports = { "bolt_port": 7687, "http_port": 7474, } if service.secured: ports["https_port"] = 7473 service.instances.append(Neo4jInstance(service, "a", **ports)) return service def __init__(self, name, image_tag, auth, cert_key_pair): self.name = name or random_name(7) self.image = Neo4jImage(image_tag) self.auth = Auth(*auth) if auth else make_auth() if self.auth.user != "neo4j": raise ValueError("Auth user must be 'neo4j' or empty") self.cert_key_pair = cert_key_pair self.instances = [] self.network = None self.console = None def __enter__(self): try: self.start() except KeyboardInterrupt: self.stop() raise else: return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() def _get_instance_by_address(self, address): address = Address((address.host, address.port_number)) for instance in self.instances: if instance.addresses["bolt"] == address: return instance def _for_each_instance(self, f): threads = [] for instance in self.instances: thread = Thread(target=f(instance)) thread.daemon = True thread.start() threads.append(thread) for thread in threads: thread.join() @property def secured(self): if self.cert_key_pair is None or self.cert_key_pair == (None, None): return False else: return True def start(self): self.network = docker.networks.create(self.name) self._for_each_instance(lambda instance: instance.start) self.await_started() def await_started(self): def wait(instance): instance.await_started() self._for_each_instance(wait) if all(instance.ready == 1 for instance in self.instances): log.info("Neo4j %s %s service %r available", self.image.edition.title(), self.image.version, self.name) else: raise RuntimeError("Service %r unavailable - " "some instances failed" % self.name) def stop(self): log.info("Stopping service %r", self.name) def _stop(instance): instance.stop() self._for_each_instance(_stop) if self.network: self.network.remove() @classmethod def find_and_stop(cls, service_name): for container in docker.containers.list(all=True): if container.name.endswith(".{}".format(service_name)): container.stop() container.remove(force=True) docker.networks.get(service_name).remove() def env(self): addresses = [instance.address for instance in self.instances] auth = "{}:{}".format(self.auth.user, self.auth.password) return { "BOLT_SERVER_ADDR": " ".join(map(str, addresses)), "NEO4J_AUTH": auth, } class Neo4jImage(object): def __init__(self, tag="latest"): self._image_tag = self._resolve_image_tag(tag) try: self._image = docker.images.get(self._image_tag) except ImageNotFound: log.info("Downloading Docker image %r", self._image_tag) self._image = docker.images.pull(self._image_tag) def __repr__(self): return "Neo4jImage(tag=%r)" % self._image_tag @property def id(self): return self._image.id @property def tarball(self): """ Name of the Neo4j tarball used to build the Docker image used by this service. """ for item in self._image.attrs["Config"]["Env"]: name, _, value = item.partition("=") if name == "NEO4J_TARBALL": return value @property def edition(self): """ Edition of Neo4j used to build the Docker image used by this service. """ _, edition, _, _ = self.tarball.split("-") return edition @property def version(self): """ Version of Neo4j used to build the Docker image used by this service. """ _, _, version, _ = self.tarball.split("-") return Version(version) @classmethod def _resolve_image_tag(cls, tag): """ Resolve an informal image tag into a full Docker image tag. Any tag available on Docker Hub for Neo4j can be used, and if no 'neo4j:' prefix exists, this will be added automatically. The default edition is Community, unless a cluster is being created in which case Enterprise edition is selected instead. Explicit selection of Enterprise edition can be made by adding an '-enterprise' suffix to the image tag. If a 'file:' URI is passed in here instead of an image tag, the Docker image will be loaded from that file instead. Examples of valid tags: - 3.4.6 - neo4j:3.4.6 - latest - file:/home/me/image.tar """ resolved = tag if resolved.startswith("file:"): return cls._load_image_from_file(resolved[5:]) if ":" not in resolved: resolved = "neo4j:" + tag return resolved @classmethod def _load_image_from_file(cls, name): with open(name, "rb") as f: images = docker.images.load(f.read()) image = images[0] return image.tags[0]
datasets.py
import glob import math import os import random import shutil import time from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from ..utils.utils import xyxy2xywh, xywh2xyxy help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng'] vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv'] # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False): dataset = LoadImagesAndLabels(path, imgsz, batch_size, augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, single_cls=opt.single_cls, stride=stride, pad=pad) batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=nw, pin_memory=True, collate_fn=LoadImagesAndLabels.collate_fn) return dataloader, dataset class LoadImages: # for inference def __init__(self, img, img_size=640): # path = str(Path(path)) # os-agnostic # files = [] # if os.path.isdir(path): # files = sorted(glob.glob(os.path.join(path, '*.*'))) # elif os.path.isfile(path): # files = [path] # images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] # videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] # nI, nV = len(images), len(videos) self.img_size = img_size self.files = img # self.nF = nI + nV # number of files # self.video_flag = [False] * nI + [True] * nV self.mode = 'images' # if any(videos): # self.new_video(videos[0]) # new video # else: self.cap = None # assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \ # (path, img_formats, vid_formats) def __iter__(self): self.count = 0 return self def __next__(self): # if self.count == self.nF: # raise StopIteration path = self.files # if self.video_flag[self.count]: # Read video # self.mode = 'video' # ret_val, img0 = self.cap.read() # if not ret_val: # self.count += 1 # self.cap.release() # if self.count == self.nF: # last video # raise StopIteration # else: # path = self.files[self.count] # self.new_video(path) # ret_val, img0 = self.cap.read() # self.frame += 1 # print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='') # else: # Read image self.count += 1 img0 = path # BGR assert img0 is not None, 'Image Not Found ' + path # print('image %g/%g %s: ' % (self.count, self.nF, path), end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image return img, img0, self.cap def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nF # number of files class LoadWebcam: # for inference def __init__(self, pipe=0, img_size=640): self.img_size = img_size if pipe == '0': pipe = 0 # local camera # pipe = 'rtsp://192.168.1.64/1' # IP camera # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/ # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer self.pipe = pipe self.cap = cv2.VideoCapture(pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if cv2.waitKey(1) == ord('q'): # q to quit self.cap.release() cv2.destroyAllWindows() raise StopIteration # Read frame if self.pipe == 0: # local camera ret_val, img0 = self.cap.read() img0 = cv2.flip(img0, 1) # flip left-right else: # IP camera n = 0 while True: n += 1 self.cap.grab() if n % 30 == 0: # skip frames ret_val, img0 = self.cap.retrieve() if ret_val: break # Print assert ret_val, 'Camera Error %s' % self.pipe img_path = 'webcam.jpg' print('webcam %g: ' % self.count, end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return img_path, img, img0, None def __len__(self): return 0 class LoadStreams: # multiple IP or RTSP cameras def __init__(self, sources='streams.txt', img_size=640): self.mode = 'images' self.img_size = img_size if os.path.isfile(sources): with open(sources, 'r') as f: sources = [x.strip() for x in f.read().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = sources for i, s in enumerate(sources): # Start the thread to read frames from the video stream print('%g/%g: %s... ' % (i + 1, n, s), end='') cap = cv2.VideoCapture(0 if s == '0' else s) assert cap.isOpened(), 'Failed to open %s' % s w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) % 100 _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) thread.start() print('') # newline # check for common shapes s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') def update(self, index, cap): # Read next stream frame in a daemon thread n = 0 while cap.isOpened(): n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame _, self.imgs[index] = cap.retrieve() n = 0 time.sleep(0.01) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 img0 = self.imgs.copy() if cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) return self.sources, img, img0, None def __len__(self): return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0): try: path = str(Path(path)) # os-agnostic parent = str(Path(path).parent) + os.sep if os.path.isfile(path): # file with open(path, 'r') as f: f = f.read().splitlines() f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path elif os.path.isdir(path): # folder f = glob.iglob(path + os.sep + '*.*') else: raise Exception('%s does not exist' % path) self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats] except: raise Exception('Error loading data from %s. See %s' % (path, help_url)) n = len(self.img_files) assert n > 0, 'No images found in %s. See %s' % (path, help_url) bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches self.n = n # number of images self.batch = bi # batch index of image self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride # Define labels self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files] # Read image shapes (wh) sp = path.replace('.txt', '') + '.shapes' # shapefile path try: with open(sp, 'r') as f: # read existing shapefile s = [x.split() for x in f.read().splitlines()] assert len(s) == n, 'Shapefile out of sync' except: s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')] np.savetxt(sp, s, fmt='%g') # overwrites existing (if any) self.shapes = np.array(s, dtype=np.float64) # Rectangular Training https://github.com/ultralytics/yolov3/issues/232 if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.img_files = [self.img_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache labels self.imgs = [None] * n self.labels = [np.zeros((0, 5), dtype=np.float32)] * n create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file if os.path.isfile(np_labels_path): s = np_labels_path # print string x = np.load(np_labels_path, allow_pickle=True) if len(x) == n: self.labels = x labels_loaded = True else: s = path.replace('images', 'labels') pbar = tqdm(self.label_files) for i, file in enumerate(pbar): if labels_loaded: l = self.labels[i] # np.savetxt(file, l, '%g') # save *.txt from *.npy file else: try: with open(file, 'r') as f: l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) except: nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing continue if l.shape[0]: assert l.shape[1] == 5, '> 5 label columns: %s' % file assert (l >= 0).all(), 'negative labels: %s' % file assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows if single_cls: l[:, 0] = 0 # force dataset into single-class mode self.labels[i] = l nf += 1 # file found # Create subdataset (a smaller dataset) if create_datasubset and ns < 1E4: if ns == 0: create_folder(path='./datasubset') os.makedirs('./datasubset/images') exclude_classes = 43 if exclude_classes not in l[:, 0]: ns += 1 # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image with open('./datasubset/images.txt', 'a') as f: f.write(self.img_files[i] + '\n') # Extract object detection boxes for a second stage classifier if extract_bounding_boxes: p = Path(self.img_files[i]) img = cv2.imread(str(p)) h, w = img.shape[:2] for j, x in enumerate(l): f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name) if not os.path.exists(Path(f).parent): os.makedirs(Path(f).parent) # make new output folder b = x[1:] * [w, h, w, h] # box b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.3 + 30 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' else: ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( s, nf, nm, ne, nd, n) assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url) if not labels_loaded and n > 1000: print('Saving labels to %s for faster future loading' % np_labels_path) np.save(np_labels_path, self.labels) # save for next time # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) if cache_images: # if training gb = 0 # Gigabytes of cached images pbar = tqdm(range(len(self.img_files)), desc='Caching images') self.img_hw0, self.img_hw = [None] * n, [None] * n for i in pbar: # max 10k images self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized gb += self.imgs[i].nbytes pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3 detect_corrupted_images = False if detect_corrupted_images: from skimage import io # conda install -c conda-forge scikit-image for file in tqdm(self.img_files, desc='Detecting corrupted images'): try: _ = io.imread(file) except: print('Corrupted image detected: %s' % file) def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): if self.image_weights: index = self.indices[index] hyp = self.hyp if self.mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # Load labels labels = [] x = self.labels[index] if x.size > 0: # Normalized xywh to pixel xyxy format labels = x.copy() labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] if self.augment: # Augment imagespace if not self.mosaic: img, labels = random_affine(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: # convert xyxy to xywh labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # Normalize coordinates 0 - 1 labels[:, [2, 4]] /= img.shape[0] # height labels[:, [1, 3]] /= img.shape[1] # width if self.augment: # random left-right flip lr_flip = True if lr_flip and random.random() < 0.5: img = np.fliplr(img) if nL: labels[:, 1] = 1 - labels[:, 1] # random up-down flip ud_flip = False if ud_flip and random.random() < 0.5: img = np.flipud(img) if nL: labels[:, 2] = 1 - labels[:, 2] labels_out = torch.zeros((nL, 6)) if nL: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes @staticmethod def collate_fn(batch): img, label, path, shapes = zip(*batch) # transposed for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw r = self.img_size / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 x = np.arange(0, 256, dtype=np.int16) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed # Histogram equalization # if random.random() < 0.2: # for i in range(3): # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) def load_mosaic(self, index): # loads images in a mosaic labels4 = [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels x = self.labels[index] labels = x.copy() if x.size > 0: # Normalized xywh to pixel xyxy format labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh labels4.append(labels) # Concat/clip labels if len(labels4): labels4 = np.concatenate(labels4, 0) # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine # Replicate # img4, labels4 = replicate(img4, labels4) # Augment # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning) img4, labels4 = random_affine(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], border=self.mosaic_border) # border to remove return img4, labels4 def replicate(img, labels): # Replicate labels h, w = img.shape[:2] boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return img, labels def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better test mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = new_shape ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return img, ratio, (dw, dh) def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4 # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s) # Translation T = np.eye(3) T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels) T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Combined rotation matrix M = S @ T @ R # ORDER IS IMPORTANT HERE!! if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114)) # Transform label coordinates n = len(targets) if n: # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = (xy @ M.T)[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # # apply angle-based reduction of bounding boxes # radians = a * math.pi / 180 # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 # x = (xy[:, 2] + xy[:, 0]) / 2 # y = (xy[:, 3] + xy[:, 1]) / 2 # w = (xy[:, 2] - xy[:, 0]) * reduction # h = (xy[:, 3] - xy[:, 1]) * reduction # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # reject warped points outside of image xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) w = xy[:, 2] - xy[:, 0] h = xy[:, 3] - xy[:, 1] area = w * h area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2]) ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20) targets = targets[i] targets[:, 1:5] = xy[i] return img, targets def cutout(image, labels): # https://arxiv.org/abs/1708.04552 # https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py # https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509 h, w = image.shape[:2] def bbox_ioa(box1, box2): # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 box2 = box2.transpose() # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 # Intersection over box2 area return inter_area / box2_area # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size() # creates a new ./images_reduced folder with reduced size images of maximum size img_size path_new = path + '_reduced' # reduced images path create_folder(path_new) for f in tqdm(glob.glob('%s/*.*' % path)): try: img = cv2.imread(f) h, w = img.shape[:2] r = img_size / max(h, w) # size ratio if r < 1.0: img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg') cv2.imwrite(fnew, img) except: print('WARNING: image failure %s' % f) def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp() # Save images formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats] # for path in ['../coco/images/val2014', '../coco/images/train2014']: for path in ['../data/sm4/images', '../data/sm4/background']: create_folder(path + 'bmp') for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng'] for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext): cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f)) # Save labels # for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']: for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']: with open(file, 'r') as f: lines = f.read() # lines = f.read().replace('2014/', '2014bmp/') # coco lines = lines.replace('/images', '/imagesbmp') lines = lines.replace('/background', '/backgroundbmp') for ext in formats: lines = lines.replace(ext, '.bmp') with open(file.replace('.txt', 'bmp.txt'), 'w') as f: f.write(lines) def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp() # Converts dataset to bmp (for faster training) formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats] for a, b, files in os.walk(dataset): for file in tqdm(files, desc=a): p = a + '/' + file s = Path(file).suffix if s == '.txt': # replace text with open(p, 'r') as f: lines = f.read() for f in formats: lines = lines.replace(f, '.bmp') with open(p, 'w') as f: f.write(lines) elif s in formats: # replace image cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p)) if s != '.bmp': os.system("rm '%s'" % p) def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder() # Copies all the images in a text file (list of images) into a folder create_folder(path[:-4]) with open(path, 'r') as f: for line in f.read().splitlines(): os.system('cp "%s" %s' % (line, path[:-4])) print(line) def create_folder(path='./new_folder'): # Create folder if os.path.exists(path): shutil.rmtree(path) # delete output folder os.makedirs(path) # make new output folder
test_MMTransE_lan_mapping_cn.py
import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '../../src/MMTransE')) from MMTransE import MMTransE import time import multiprocessing from multiprocessing import Process, Value, Lock, Manager, Array import numpy as np from numpy import linalg as LA fmap = os.path.join(os.path.dirname(__file__), '../../data/CN3l/en_de/en2de_cn.csv') fmap2 = os.path.join(os.path.dirname(__file__), '../../data/CN3l/en_de/de2en_cn.csv') fmodel = os.path.join(os.path.dirname(__file__), '../../models/en_de/model_MMtransE_cn_ed.bin') ofile1 = os.path.join(os.path.dirname(__file__), '../../results/C_test_en2de_score_MM.txt') ofile4 = os.path.join(os.path.dirname(__file__), '../../results/C_test_de2en_score_MM.txt') ef_map = {} fe_map = {} vocab_e = [] vocab_f = [] topK = 10 model = MMTransE() model.load(fmodel) def seem_hit(x, y): for i in y: if x.find(i) > -1 or i.find(x) > -1: return True return False for line in open(fmap): line = line.rstrip('\n').split('@@@') if len(line) != 2: continue vocab_e.append(line[0]) if ef_map.get(line[0]) == None: ef_map[line[0]] = [line[1]] else: ef_map[line[0]].append(line[1]) for line in open(fmap2): line = line.rstrip('\n').split('@@@') if len(line) != 2: continue vocab_f.append(line[0]) if fe_map.get(line[1]) == None: fe_map[line[1]] = [line[0]] else: fe_map[line[1]].append(line[0]) print "Loaded en_de de_en mappings." #en:... manager = Manager() lock1 = Lock() past_num = Value('i', 0, lock=True) score = manager.list()#store hit @ k rank = Value('d', 0.0, lock=True) rank_num = Value('i', 0, lock=True) cpu_count = multiprocessing.cpu_count() t0 = time.time() def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num): while index.value < len(vocab): id = index.value index.value += 1 word = vocab[id] if id % 100 == 0: print id ,'/', len(vocab), ' time used ',time.time() - t0 print score print rank.value tgt = map.get(word) cand = model.kNN_entity_name(word, src_lan, tgt_lan, topK) cand = [x[0] for x in cand] tmp_score = np.zeros(topK) hit = False last_i = 0 cur_rank = None if tgt == None: continue for i in range(len(cand)): last_i = i tmp_cand = cand[i] if hit == False and (seem_hit(tmp_cand, tgt) == True):# or tmp_cand == word): hit = True if hit == True: tmp_score[i] = 1.0 if cur_rank == None: cur_rank = i while last_i < topK: if hit: tmp_score[last_i] = 1.0 last_i += 1 if len(score) == 0: score.append(tmp_score) else: with lock1: score[0] = (score[0] * past_num.value + tmp_score) / (past_num.value + 1.0) past_num.value += 1 if cur_rank != None: rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1) rank_num.value += 1 continue tmp_dist = 2 vec_t = None vec_s = model.entity_transfer_vec(word, src_lan, tgt_lan) for tmp_vec in tgt: tmp_vec_t = model.entity_vec(tmp_vec, tgt_lan) if tmp_vec_t is None: continue cur_dist = LA.norm(tmp_vec_t - vec_s) if cur_dist < tmp_dist: tmp_dist = cur_dist vec_t = tmp_vec_t if vec_t is None: continue cur_rank = model.entity_rank(vec_s, vec_t, tgt_lan) rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1) rank_num.value += 1 index = Value('i',0,lock=True) processes = [Process(target=test, args=(model, vocab_e, index, 'en', 'de', ef_map, score, past_num)) for x in range(cpu_count - 1)] for p in processes: p.start() for p in processes: p.join() with open(ofile1, 'w') as fp: fp.write(str(rank.value) + '\n') for s in score[0]: fp.write(str(s) + '\t') print 'Finished testing en to de' #de:... manager = Manager() past_num = Value('i', 0, lock=True) score = manager.list()#store hit @ k rank = Value('d', 0.0, lock=True) rank_num = Value('i', 0, lock=True) index = Value('i',0,lock=True) processes = [Process(target=test, args=(model, vocab_f, index, 'de', 'en', fe_map, score, past_num)) for x in range(cpu_count - 1)] for p in processes: p.start() for p in processes: p.join() with open(ofile4, 'w') as fp: fp.write(str(rank.value) + '\n') for s in score[0]: fp.write(str(s) + '\t') print 'Finished testing de to en'
dispatcher.py
"""GRPC client. Implements loading and execution of Python workers. """ import asyncio import concurrent.futures import logging import queue import threading import traceback import grpc from . import functions from . import loader from . import protos from . import rpc_types class DispatcherMeta(type): __current_dispatcher__ = None @property def current(mcls): disp = mcls.__current_dispatcher__ if disp is None: raise RuntimeError('no currently running Dispatcher is found') return disp class Dispatcher(metaclass=DispatcherMeta): _GRPC_STOP_RESPONSE = object() def __init__(self, loop, host, port, worker_id, request_id, grpc_connect_timeout): self._loop = loop self._host = host self._port = port self._request_id = request_id self._worker_id = worker_id self._functions = functions.Registry() # A thread-pool for synchronous function calls. We limit # the number of threads to 1 so that one Python worker can # only run one synchronous function in parallel. This is # because synchronous code in Python is rarely designed with # concurrency in mind, so we don't want to allow users to # have races in their synchronous functions. Moreover, # because of the GIL in CPython, it rarely makes sense to # use threads (unless the code is IO bound, but we have # async support for that.) self._sync_call_tp = concurrent.futures.ThreadPoolExecutor( max_workers=1) self._grpc_connect_timeout = grpc_connect_timeout self._grpc_resp_queue: queue.Queue = queue.Queue() self._grpc_connected_fut = loop.create_future() self._grpc_thread = threading.Thread( name='grpc-thread', target=self.__poll_grpc) self._logger = logging.getLogger('python-azure-worker') @classmethod async def connect(cls, host, port, worker_id, request_id, connect_timeout): loop = asyncio._get_running_loop() disp = cls(loop, host, port, worker_id, request_id, connect_timeout) disp._grpc_thread.start() await disp._grpc_connected_fut return disp async def dispatch_forever(self): if DispatcherMeta.__current_dispatcher__ is not None: raise RuntimeError( 'there can be only one running dispatcher per process') self._old_task_factory = self._loop.get_task_factory() loader.install() DispatcherMeta.__current_dispatcher__ = self try: forever = self._loop.create_future() self._grpc_resp_queue.put_nowait( protos.StreamingMessage( request_id=self.request_id, start_stream=protos.StartStream( worker_id=self.worker_id))) self._loop.set_task_factory( lambda loop, coro: ContextEnabledTask(coro, loop=loop)) logging_handler = AsyncLoggingHandler() root_logger = logging.getLogger() root_logger.addHandler(logging_handler) try: await forever finally: root_logger.removeHandler(logging_handler) finally: DispatcherMeta.__current_dispatcher__ = None loader.uninstall() self._loop.set_task_factory(self._old_task_factory) self.stop() def stop(self): if self._grpc_thread is not None: self._grpc_resp_queue.put_nowait(self._GRPC_STOP_RESPONSE) self._grpc_thread.join() self._grpc_thread = None if self._sync_call_tp is not None: self._sync_call_tp.shutdown() self._sync_call_tp = None def _on_logging(self, record: logging.LogRecord): if record.levelno >= logging.CRITICAL: log_level = protos.RpcLog.Critical elif record.levelno >= logging.ERROR: log_level = protos.RpcLog.Error elif record.levelno >= logging.WARNING: log_level = protos.RpcLog.Warning elif record.levelno >= logging.INFO: log_level = protos.RpcLog.Info elif record.levelno >= logging.DEBUG: log_level = protos.RpcLog.Debug else: log_level = getattr(protos.RpcLog, 'None') log = dict( level=log_level, message=record.msg, category=record.name, ) invocation_id = get_current_invocation_id() if invocation_id is not None: log['invocation_id'] = invocation_id # XXX: When an exception field is set in RpcLog, WebHost doesn't # wait for the call result and simply aborts the execution. # # if record.exc_info and record.exc_info[1] is not None: # log['exception'] = self._serialize_exception(record.exc_info[1]) self._grpc_resp_queue.put_nowait( protos.StreamingMessage( request_id=self.request_id, rpc_log=protos.RpcLog(**log))) @property def request_id(self): return self._request_id @property def worker_id(self): return self._worker_id def _serialize_exception(self, exc): return protos.RpcException( message=f'{type(exc).__name__}: {exc.args[0]}', stack_trace=''.join(traceback.format_tb(exc.__traceback__))) async def _dispatch_grpc_request(self, request): content_type = request.WhichOneof('content') request_handler = getattr(self, f'_handle__{content_type}', None) if request_handler is None: # Don't crash on unknown messages. Some of them can be ignored; # and if something goes really wrong the host can always just # kill the worker's process. self._logger.error( f'unknown StreamingMessage content type {content_type}') return resp = await request_handler(request) self._grpc_resp_queue.put_nowait(resp) async def _handle__worker_init_request(self, req): return protos.StreamingMessage( request_id=self.request_id, worker_init_response=protos.WorkerInitResponse( result=protos.StatusResult( status=protos.StatusResult.Success))) async def _handle__function_load_request(self, req): func_request = req.function_load_request function_id = func_request.function_id try: func = loader.load_function( func_request.metadata.name, func_request.metadata.directory, func_request.metadata.script_file) self._functions.add_function( function_id, func, func_request.metadata) return protos.StreamingMessage( request_id=self.request_id, function_load_response=protos.FunctionLoadResponse( function_id=function_id, result=protos.StatusResult( status=protos.StatusResult.Success))) except Exception as ex: return protos.StreamingMessage( request_id=self.request_id, function_load_response=protos.FunctionLoadResponse( function_id=function_id, result=protos.StatusResult( status=protos.StatusResult.Failure, exception=self._serialize_exception(ex)))) async def _handle__invocation_request(self, req): invoc_request = req.invocation_request invocation_id = invoc_request.invocation_id function_id = invoc_request.function_id # Set the current `invocation_id` to the current task so # that our logging handler can find it. current_task = asyncio.Task.current_task(self._loop) assert isinstance(current_task, ContextEnabledTask) current_task.set_azure_invocation_id(invocation_id) try: fi: functions.FunctionInfo = self._functions.get(function_id) params = {} for pb in invoc_request.input_data: params[pb.name] = rpc_types.from_incoming_proto(pb.data) if fi.requires_context: params['context'] = rpc_types.Context( fi.name, fi.directory, invocation_id) if fi.output_types: for name in fi.output_types: params[name] = rpc_types.Out() if fi.is_async: call_result = await fi.func(**params) else: call_result = await self._loop.run_in_executor( self._sync_call_tp, self.__run_sync_func, invocation_id, fi.func, params) output_data = [] if fi.output_types: for out_name, out_type in fi.output_types.items(): val = params[name].get() if val is None: # TODO: is the "Out" parameter optional? # Can "None" be marshaled into protos.TypedData? continue rpc_val = rpc_types.to_outgoing_proto(out_type, val) assert rpc_val is not None output_data.append( protos.ParameterBinding( name=name, data=rpc_val)) return_value = None if fi.return_type is not None: return_value = rpc_types.to_outgoing_proto( fi.return_type, call_result) return protos.StreamingMessage( request_id=self.request_id, invocation_response=protos.InvocationResponse( invocation_id=invocation_id, return_value=return_value, result=protos.StatusResult( status=protos.StatusResult.Success), output_data=output_data)) except Exception as ex: return protos.StreamingMessage( request_id=self.request_id, invocation_response=protos.InvocationResponse( invocation_id=invocation_id, result=protos.StatusResult( status=protos.StatusResult.Failure, exception=self._serialize_exception(ex)))) def __run_sync_func(self, invocation_id, func, params): # This helper exists because we need to access the current # invocation_id from ThreadPoolExecutor's threads. _invocation_id_local.v = invocation_id try: return func(**params) finally: _invocation_id_local.v = None def __poll_grpc(self): channel = grpc.insecure_channel(f'{self._host}:{self._port}') try: grpc.channel_ready_future(channel).result( timeout=self._grpc_connect_timeout) except Exception as ex: self._loop.call_soon_threadsafe( self._grpc_connected_fut.set_exception, ex) return else: self._loop.call_soon_threadsafe( self._grpc_connected_fut.set_result, True) stub = protos.FunctionRpcStub(channel) def gen(resp_queue): while True: msg = resp_queue.get() if msg is self._GRPC_STOP_RESPONSE: grpc_req_stream.cancel() return yield msg grpc_req_stream = stub.EventStream(gen(self._grpc_resp_queue)) try: for req in grpc_req_stream: self._loop.call_soon_threadsafe( self._loop.create_task, self._dispatch_grpc_request(req)) except Exception as ex: if ex is grpc_req_stream: # Yes, this is how grpc_req_stream iterator exits. return raise class AsyncLoggingHandler(logging.Handler): def emit(self, record): Dispatcher.current._on_logging(record) class ContextEnabledTask(asyncio.Task): _AZURE_INVOCATION_ID = '__azure_function_invocation_id__' def __init__(self, coro, loop): super().__init__(coro, loop=loop) current_task = asyncio.Task.current_task(loop) if current_task is not None: invocation_id = getattr( current_task, self._AZURE_INVOCATION_ID, None) if invocation_id is not None: self.set_azure_invocation_id(invocation_id) def set_azure_invocation_id(self, invocation_id): setattr(self, self._AZURE_INVOCATION_ID, invocation_id) def get_current_invocation_id(): loop = asyncio._get_running_loop() if loop is not None: current_task = asyncio.Task.current_task(loop) if current_task is not None: return getattr( current_task, ContextEnabledTask._AZURE_INVOCATION_ID, None) return getattr(_invocation_id_local, 'v', None) _invocation_id_local = threading.local()
workflows_scaling.py
#!/usr/bin/env python """A small script to drive workflow performance testing. % ./test/manual/launch_and_run.sh workflows_scaling --collection_size 500 --workflow_depth 4 $ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Workflow step' $ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Created step' """ import functools import json import os import random import sys from argparse import ArgumentParser from threading import Thread from uuid import uuid4 from bioblend import galaxy from gxformat2 import python_to_workflow galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path[1:1] = [os.path.join(galaxy_root, "lib"), os.path.join(galaxy_root, "test")] from galaxy_test.base.populators import ( GiDatasetCollectionPopulator, GiDatasetPopulator, GiWorkflowPopulator, ) LONG_TIMEOUT = 1000000000 DESCRIPTION = "Script to exercise the workflow engine." def main(argv=None): """Entry point for workflow driving.""" arg_parser = ArgumentParser(description=DESCRIPTION) arg_parser.add_argument("--api_key", default="testmasterapikey") arg_parser.add_argument("--host", default="http://localhost:8080/") arg_parser.add_argument("--collection_size", type=int, default=20) arg_parser.add_argument("--schedule_only_test", default=False, action="store_true") arg_parser.add_argument("--workflow_depth", type=int, default=10) arg_parser.add_argument("--workflow_count", type=int, default=1) group = arg_parser.add_mutually_exclusive_group() group.add_argument("--two_outputs", default=False, action="store_true") group.add_argument("--wave_simple", default=False, action="store_true") args = arg_parser.parse_args(argv) uuid = str(uuid4()) workflow_struct = _workflow_struct(args, uuid) has_input = any(s.get("type", "tool") == "input_collection" for s in workflow_struct) if not has_input: uuid = None gi = _gi(args) workflow = python_to_workflow(workflow_struct) workflow_info = gi.workflows.import_workflow_json(workflow) workflow_id = workflow_info["id"] target = functools.partial(_run, args, gi, workflow_id, uuid) threads = [] for _ in range(args.workflow_count): t = Thread(target=target) t.daemon = True t.start() threads.append(t) for t in threads: t.join() def _run(args, gi, workflow_id, uuid): dataset_populator = GiDatasetPopulator(gi) dataset_collection_populator = GiDatasetCollectionPopulator(gi) history_id = dataset_populator.new_history() if uuid is not None: contents = [] for i in range(args.collection_size): contents.append("random dataset number #%d" % i) hdca = dataset_collection_populator.create_list_in_history(history_id, contents=contents).json() label_map = { uuid: {"src": "hdca", "id": hdca["id"]}, } else: label_map = {} workflow_request = dict( history="hist_id=%s" % history_id, ) workflow_request["inputs"] = json.dumps(label_map) url = "workflows/%s/usage" % (workflow_id) invoke_response = dataset_populator._post(url, data=workflow_request).json() invocation_id = invoke_response["id"] workflow_populator = GiWorkflowPopulator(gi) if args.schedule_only_test: workflow_populator.wait_for_invocation( workflow_id, invocation_id, timeout=LONG_TIMEOUT, ) else: workflow_populator.wait_for_workflow( workflow_id, invocation_id, history_id, timeout=LONG_TIMEOUT, ) def _workflow_struct(args, input_uuid): if args.two_outputs: return _workflow_struct_two_outputs(args, input_uuid) elif args.wave_simple: return _workflow_struct_wave(args, input_uuid) else: return _workflow_struct_simple(args, input_uuid) def _workflow_struct_simple(args, input_uuid): workflow_struct = [ {"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}}, {"tool_id": "cat", "state": {"input1": _link(0, "output")}}, ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link = str(i + 1) + "#out_file1" workflow_struct.append({"tool_id": "cat", "state": {"input1": _link(link)}}) return workflow_struct def _workflow_struct_two_outputs(args, input_uuid): workflow_struct = [ {"type": "input_collection", "uuid": input_uuid}, {"tool_id": "cat", "state": {"input1": _link(0), "input2": _link(0)}}, ] workflow_depth = args.workflow_depth for i in range(workflow_depth): link1 = str(i + 1) + "#out_file1" link2 = str(i + 1) + "#out_file2" workflow_struct.append({"tool_id": "cat", "state": {"input1": _link(link1), "input2": _link(link2)}}) return workflow_struct def _workflow_struct_wave(args, input_uuid): workflow_struct = [ {"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}}, {"tool_id": "cat_list", "state": {"input1": _link(0, "output")}}, ] workflow_depth = args.workflow_depth for i in range(workflow_depth): step = i + 2 if step % 2 == 1: workflow_struct += [{"tool_id": "cat_list", "state": {"input1": _link(step - 1, "output")}}] else: workflow_struct += [{"tool_id": "split", "state": {"input1": _link(step - 1, "out_file1")}}] return workflow_struct def _link(link, output_name=None): if output_name is not None: link = str(link) + "#" + output_name return {"$link": link} def _gi(args): gi = galaxy.GalaxyInstance(args.host, key=args.api_key) name = "wftest-user-%d" % random.randint(0, 1000000) user = gi.users.create_local_user(name, "%s@galaxytesting.dev" % name, "pass123") user_id = user["id"] api_key = gi.users.create_user_apikey(user_id) user_gi = galaxy.GalaxyInstance(args.host, api_key) return user_gi if __name__ == "__main__": main()
syncMove.py
#!/usr/bin/python3 import os import sys import argparse import pathlib import shutil import time import datetime import threading for c in range((shutil.get_terminal_size().columns-1)): print('-', end='') print('') parser = argparse.ArgumentParser( prog=os.path.basename(__file__), description='' ) parser.add_argument('--debug', help='Turn on of the debug mode.', action='store_true', ) parser.add_argument('--trash', help='Trash dir.', nargs=1, default=None, metavar='TRASH', ) parser.add_argument('--duplicatecsv', help='Generate list a Duplicated Dirs.', action='store_true', default=False, ) parser.add_argument('dirSrc', nargs=1, default=None, metavar='SOURCE', help='', ) parser.add_argument('dirDst', nargs=1, default=None, metavar='DEST', help='', ) args = parser.parse_args() # Is Exist dirSrc = str(args.dirSrc[0]) if not pathlib.Path(str(dirSrc)).exists(): print('[ERROR]>> No such a directory.') print(' ' + str(dirSrc)) parser.print_usage() sys.exit(1) # Is Dir if not pathlib.Path(str(dirSrc)).is_dir(): print('[ERROR]>> Is a not directory.') print(' ' + str(dirSrc)) parser.print_usage() sys.exit(1) dirDst = str(args.dirDst[0]) # Is Exist if not pathlib.Path(str(dirDst)).exists(): print('[ERROR]>> No such a directory.') print(' ' + str(dirDst)) parser.print_usage() sys.exit(1) # Is Dir if not pathlib.Path(str(dirDst)).is_dir(): print('[ERROR]>> Is a not directory.') print(' ' + str(dirDst)) parser.print_usage() sys.exit(1) dirRecycleBin = None if(args.trash is not None): dirRecycleBin = str(args.trash[0]) # Is Exist if not pathlib.Path(str(dirRecycleBin)).exists(): print('[ERROR]>> No such a directory.') print(' ' + str(dirRecycleBin)) parser.print_usage() sys.exit(1) # Is Dir if not pathlib.Path(str(dirRecycleBin)).is_dir(): print('[ERROR]>> Is a not directory.') print(' ' + str(dirRecycleBin)) parser.print_usage() sys.exit(1) print(' !!>> --------------- <<!! ' + '') print(' !!>> --trash is On <<!! ' + '') print(' !!>> --------------- <<!! ' + '') print(str(dirRecycleBin)) print('\n') if(args.duplicatecsv is True): print(' !!>> ---------------------- <<!! ' + '') print(' !!>> --duplicatecsv is On <<!! ' + '') print(' !!>> ---------------------- <<!! ' + '') print('\n') print('Src: ' + str(pathlib.Path(str(dirSrc)).resolve())) print('Dst: ' + str(pathlib.Path(str(dirDst)).resolve())) def tran(srcDirs, dirDst): shutil.move(str(srcDirs), str(dirDst)) def elapsed_timer(): pass try: run3 = str(input('Is this OK [y/N]: ')) run3 = run3.lower().strip() if run3 != 'y' and run3 != 'yes' : sys.exit(1) for srcDirs in (pathlib.Path(str(dirSrc)).iterdir()): #print(str(srcDirs) + ': ', end='') findDir = False findChildrenDir = False procTimer = datetime.datetime.now() transfer = '' if not srcDirs.is_dir(): continue findDir = pathlib.Path(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc))).exists() if findDir != True: os.makedirs(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc))) if False: # cb94eaa2c2b7a439fcb0bb4f66a4204c704ba282 if args.debug == True: logg_time=datetime.datetime.now() print(str(logg_time.hour).zfill(2) + ':' + str(logg_time.minute).zfill(2) + ':' + str(logg_time.second).zfill(2) + '.' + '{:0<3}'.format(int(logg_time.microsecond/1000)) + ' ', end='') print('Transferring... \'' + str(srcDirs.relative_to(dirSrc)) + '\'') if args.debug == True: print(' ' + 'From ' + ': ' + str(srcDirs) ) print(' ' + 'To ' + ': ' + str(dirDst) ) if(args.duplicatecsv is not True): if __name__ == '__main__': thread1 = threading.Thread(target=tran, kwargs={'srcDirs': str(srcDirs), 'dirDst': str(dirDst)}) thread1.start() thread1.join() if args.debug == True: print(' ' + 'Elapsed ' + ': ' + str((datetime.datetime.now() - procTimer).seconds) + 's' ) continue else: for srcChildrenDirs in (pathlib.Path(str(srcDirs)).iterdir()): procTimer = datetime.datetime.now() if not srcChildrenDirs.is_dir(): continue findChildrenDir = pathlib.Path(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' + str(srcChildrenDirs.relative_to(dirSrc + '/' + str(srcDirs.relative_to(dirSrc)) ))).exists() if findChildrenDir != True: if(args.duplicatecsv is not True and args.debug == True): logg_time=datetime.datetime.now() print(str(logg_time.hour).zfill(2) + ':' + str(logg_time.minute).zfill(2) + ':' + str(logg_time.second).zfill(2) + '.' + '{:0<3}'.format(int(logg_time.microsecond/1000)) + ' ', end='') if(args.duplicatecsv is not True): print('Transferring... \'' + str(srcChildrenDirs.relative_to(dirSrc)) + '\'') if(args.duplicatecsv is not True and args.debug == True): print(' ' + 'From ' + ': ' + str(srcChildrenDirs) ) print(' ' + 'To ' + ': ' + str(pathlib.Path(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' + str(srcChildrenDirs.relative_to(dirSrc + '/' + str(srcDirs.relative_to(dirSrc)) )))) ) if(args.duplicatecsv is not True): if __name__ == '__main__': thread1 = threading.Thread(target=tran, kwargs={'srcDirs': str(srcChildrenDirs), 'dirDst': str(pathlib.Path(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' + str(srcChildrenDirs.relative_to(dirSrc + '/' + str(srcDirs.relative_to(dirSrc)) ))))}) thread1.start() thread1.join() if(args.duplicatecsv is not True and args.debug == True): print(' ' + 'Elapsed ' + ': ' + str((datetime.datetime.now() - procTimer).seconds) + 's' ) else: if(args.duplicatecsv is not True and args.debug == True): logg_time=datetime.datetime.now() print(str(logg_time.hour).zfill(2) + ':' + str(logg_time.minute).zfill(2) + ':' + str(logg_time.second).zfill(2) + '.' + '{:0<3}'.format(int(logg_time.microsecond/1000)) + ' ', end='') if(args.duplicatecsv is not True): print('Skipping....... \'' + str(srcChildrenDirs.relative_to(dirSrc)) + '\'') if(args.duplicatecsv is True or args.debug == True): if(args.duplicatecsv is not True): print(' ' + 'File count check: ',end='') tmp_internal_filesCount=[0, 0]; for tmp_dirpath in [ [ 'Src', str(srcChildrenDirs), 0, ], [ 'Dst', str(pathlib.Path(str(dirDst) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' + str(srcChildrenDirs.relative_to(dirSrc + '/' + str(srcDirs.relative_to(dirSrc)) )))), 0, ], ] : tmp_dirpath[2]=sum(os.path.isfile(os.path.join(str(tmp_dirpath[1]), name)) for name in os.listdir(str(tmp_dirpath[1]))) if tmp_dirpath[0]=='Src': tmp_internal_filesCount=[tmp_dirpath[2], 0] else: if tmp_dirpath[0]=='Dst': tmp_internal_filesCount[1]=tmp_dirpath[2] if tmp_internal_filesCount[0]==tmp_internal_filesCount[1]: if(args.duplicatecsv is not True): print('The same as destination.') else: logg_time=datetime.datetime.now() print(str(logg_time.hour).zfill(2) + ':' + str(logg_time.minute).zfill(2) + ':' + str(logg_time.second).zfill(2) + '.' + '{:0<3}'.format(int(logg_time.microsecond/1000)) + '', end='') print(',', end=''); print('"' + str(srcChildrenDirs.relative_to(dirSrc)) + '"', end=''); print(',', end=''); print('"' + str(srcChildrenDirs.relative_to(dirSrc)).replace('/','\\') + '"', end=''); print(''); if(args.duplicatecsv is not True and args.trash is not None): print(' Transferring... \'' + str(srcChildrenDirs.relative_to(dirSrc)) + '\'') print(' To \'' + str(dirRecycleBin) + '\'') if __name__ == '__main__': thread1 = threading.Thread(target=tran, kwargs={'srcDirs': str(srcChildrenDirs), 'dirDst': str(pathlib.Path(str(dirRecycleBin) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' ))}) thread1.start() thread1.join() else: if(args.duplicatecsv is not True): print('There isn\'t same as destination. ') print(' ' + 'Src: '+str(tmp_internal_filesCount[0])) print(' ' + 'Dst: '+str(tmp_internal_filesCount[1])) if(args.duplicatecsv is not True and args.trash is not None): if( tmp_internal_filesCount[0] < tmp_internal_filesCount[1] ): print(' Transferring... \'' + str(srcChildrenDirs.relative_to(dirSrc)) + '\'') print(' To \'' + str(dirRecycleBin) + '\'') if __name__ == '__main__': thread1 = threading.Thread(target=tran, kwargs={'srcDirs': str(srcChildrenDirs), 'dirDst': str(pathlib.Path(str(dirRecycleBin) + '/' + str(srcDirs.relative_to(dirSrc)) + '/' ))}) thread1.start() thread1.join() try: os.rmdir(str(srcDirs)) except OSError as e: pass except KeyboardInterrupt: print('^C') sys.exit(1)
app.py
# https://stackoverflow.com/questions/25149493/how-to-call-another-webservice-api-from-flask # https://thispointer.com/python-three-ways-to-check-if-a-file-is-empty/ # https://www.geeksforgeeks.org/how-to-create-a-pop-up-message-when-a-button-is-pressed-in-python-tkinter/ # https://stackoverflow.com/questions/23112316/using-flask-how-do-i-modify-the-cache-control-header-for-all-output # https://dbader.org/blog/python-check-if-file-exists#:~:text=The%20most%20common%20way%20to%20check%20for%20the,search%20engine%20on%20how%20to%20solve%20this%20problem. # fixing word cloud multi thread issues: # https://www.shanelynn.ie/using-python-threading-for-multiple-results-queue/ # https://stackoverflow.com/questions/31264826/start-a-flask-application-in-separate-thread # https://izziswift.com/start-a-flask-application-in-separate-thread/ # other changes # https://stackoverflow.com/questions/29104107/upload-image-using-post-form-data-in-python-requests # https://stackoverflow.com/questions/29104107/upload-image-using-post-form-data-in-python-requests # https://stackoverflow.com/questions/55265779/how-to-jsonify-a-picture-in-flask # http://whitenoise.evans.io/en/stable/flask.html import os import requests import os.path import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import requests from flask import Flask, jsonify, render_template, redirect, url_for, request, abort import flask_restful import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS from urllib.request import urlopen from flask import flash import threading import base64 import io import logging import numpy as np from PIL import Image from io import BytesIO import webbrowser import PIL.Image data = 'foo' from forms import runWordCloudForm, AddGameForm, AddGenreForm, AddCreatorForm, AddPlatformForm, \ AddEpisodeForm, AddToM2MPlatformGame, \ EditTheGame, SearchForm, SearchForm2, RemoveGame, RemoveGenre, RemoveCreator, \ RemovePlatform, RemoveEpisode, RemoveGameAndPlatform, SearchPageForm #application flask run app = Flask(__name__) app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 300 app.config['SECRET_KEY'] = 'oTv!5ox8LB#A&@cBHpa@onsKU' def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): h = int(360.0 * 45.0 / 255.0) s = int(100.0 * 255.0 / 255.0) l = int(100.0 * float(random_state.randint(60, 120)) / 255.0) return "hsl({}, {}%, {}%)".format(h, s, l) @app.route('/', methods=['GET', 'POST']) def index(): #flash('in the index') try: f = open('game.json') f.close() except IOError: print('File is not accessible') print('File is accessible') return render_template('index.html') @app.route('/', methods=['GET', 'POST']) def cloudMake(): print('in cloudMake') flash('Word Cloud successfully created') return redirect(url_for('index')) @app.route('/SomeFunction', methods=['POST', 'GET']) def SomeFunction(): print('In SomeFunction') print("inside if") file_content = 0 file_content2 = 0 file_content = open("game.json").read() file_content2 = open("pokemonSnap_keywords.json").read() file_content += file_content2 print("FileContent") wordcloud = WordCloud( stopwords=STOPWORDS, background_color='white', width=1200, height=1000, color_func=random_color_func ).generate(file_content) plt.imshow(wordcloud) plt.axis('off') # plt.show() # saves picture file to picture format plt.savefig('static/wordCloud.png') print("wordCloud.png created") flash('Success! Word Cloud has been processed and is loading') return redirect(url_for('wordcloud')) #this needs to be the landing page for the word cloud- this is where the user hits the "submit" button @app.route("/wordcloud", methods=['POST', 'GET']) def wordcloud(): flash('Welcome!') return render_template('wordcloud.html') r = "" @app.route("/getValerie", methods=['POST', 'GET']) def getValerie(): #update the following web address to whatever team members web address will be r = requests.get("http://127.0.0.4:80/wordcloud") return(r) @app.route("/wordcloud2", methods=['POST', 'GET']) def wordcloud2(): #need to update the code below to something along the lines of the web address access #perhaps something of "did beavis work" try: #f = open('game.json') #f.close() #webbrowser.open('https://1drv.ms/u/s!AlvZSaPdNhyBtYwpp3k3WhJxctY2qw?e=DETmKW') #requests.get('https://pastebin.com/raw/W2ez0StJ') requests.get('http://valchin.com/sendjson2021') #when error happens then flashing this error will be helpful except IOError: print('File is not accessible') flash('Files not found or readable. One or more required scraper files (game.json as example) not available - please fix') return render_template('wordcloud.html') print('File is accessible') flash('You created a word cloud') #Need to update this to the proper web address for the word cloud #beavis = requests.get('http://127.0.0.4/wordcloud') #beavis = requests.get('https://pastebin.com/raw/W2ez0StJ') beavis = requests.get('http://valchin.com/sendjson2021') print("out of beavis") #Discovered this after way too long - this forces the text out of beavis and into #a format that the cloud generator can run file_content = beavis.text # literally exists out of total paranoia - shows the text print(file_content) print('out of file_content') print("FileContent") #this section generates the word cloud wordcloud = WordCloud( stopwords=STOPWORDS, background_color='white', width=1200, height=1000, color_func=random_color_func ).generate(file_content) plt.imshow(wordcloud) plt.axis('off') # plt.show() # saves picture file to picture format plt.savefig('static/wordCloud.png') print("wordCloud.png created") flash('Success! Word Cloud has been processed and is loading') return render_template('wordcloud2.html') # @app.route("/uploadImage", methods=["POST", "GET"]) def upload_image(): print('in upload image') file = request.files['static/wordCloud'] print('after file') # Read the image via file.stream img = Image.open(file.stream) print(img) return jsonify({'msg': 'success', 'size': [img.width, img.height]}) # this never worked - probably should consider deleting this. @app.route("/test", methods=['POST', 'GET']) def test_method(): # print(request.json) print('in test') if not request.json or 'static/wordCloud.png' not in request.json: print('it blew up') abort(400) print('avoided if not') # get the base64 encoded string im_b64 = request.json['static/wordCloud.png'] # convert it into bytes img_bytes = base64.b64decode(im_b64.encode('utf-8')) # convert bytes data to PIL Image object img = Image.open(io.BytesIO(img_bytes)) # PIL image object to numpy array img_arr = np.asarray(img) print('img shape', img_arr.shape) # process your img_arr here # access other keys of json # print(request.json['other_key']) result_dict = {'output': 'output_key'} return result_dict # this is the only word cloud get method that works @app.route('/wordcloud66', methods=['POST', 'GET']) def wordcloudGet66(): try: #f = open('game.json') #webbrowser.get('http://127.0.0.4/wordcloud') #requests.get('http://127.0.0.4/wordcloud') requests.get('http://valchin.com/sendjson2021') #f.close() #when error happens then flashing this error will be helpful except IOError: print('File is not accessible') flash('picture file not found') return ('File is not accessible') print('pre file content opening of word cloud') file_content = open("static/wordCloud.png", 'rb') with open('static/wordCloud.png', 'rb') as image_file: print('file_content') encoded_string = base64.b64encode(image_file.read()) print('file content created') print('checking if file can be written') #image decoding from recent encoding - this is to prove that #encoded string will actually return back to the original picture newImage = Image.open(BytesIO(base64.b64decode(encoded_string))) print('decode workie?') print("test") #image is successfully printed to static folder proving that data can be decoded newImage.save('static/noob.png', 'PNG') print('possible print') return (encoded_string) #this doesn't work probably delete @app.route('/sendWordCloud', methods=['POST', 'GET']) def sendWordCloud(): print("inside sendWordCloud") beavis = requests.post('http://127.0.0.1:5000/static/wordCloud.png') print(beavis) file_content = beavis.request print(file_content) return render_template(file_content) #this doesn't work probably delete @app.route('/sendWordCloud2', methods=['POST', 'GET']) def sendWordCloud2(): print("inside sendWordCloud") image_file_descriptor = open('static/wordCloud.png', 'rb') # Requests makes it simple to upload Multipart-encoded files files = {'media': image_file_descriptor} url = 'http://127.0.0.1:5000/static/wordCloud.png' beavis = requests.post(url, files=files) image_file_descriptor.close() return (beavis) #old app run - doesn't work now- need to base it on threading environment #if __name__ == '__main__': # app.run() @app.route("/") def main(): return data if __name__ == "__main__": threading.Thread(target=app.run).start()
pykms_Misc.py
#!/usr/bin/env python3 from __future__ import print_function import sys import logging import os import argparse from logging.handlers import RotatingFileHandler from pykms_Format import ColorExtraMap, ShellMessage, pretty_printer #------------------------------------------------------------------------------------------------------------------------------------------------------------ # https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility # https://stackoverflow.com/questions/17558552/how-do-i-add-custom-field-to-python-log-format-string # https://stackoverflow.com/questions/1343227/can-pythons-logging-format-be-modified-depending-on-the-message-log-level # https://stackoverflow.com/questions/14844970/modifying-logging-message-format-based-on-message-logging-level-in-python3 def add_logging_level(levelName, levelNum, methodName = None): """ Adds a new logging level to the `logging` module and the currently configured logging class. `levelName` becomes an attribute of the `logging` module with the value `levelNum`. `methodName` becomes a convenience method for both `logging` itself and the class returned by `logging.getLoggerClass()` (usually just `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is used. To avoid accidental clobberings of existing attributes, this method will raise an `AttributeError` if the level name is already an attribute of the `logging` module or if the method name is already present . Example ------- >>> add_logging_level('TRACE', logging.DEBUG - 5) >>> logging.getLogger(__name__).setLevel("TRACE") >>> logging.getLogger(__name__).trace('that worked') >>> logging.trace('so did this') >>> logging.TRACE 5 """ if not methodName: methodName = levelName.lower() if hasattr(logging, levelName) or hasattr(logging, methodName) or hasattr(logging.getLoggerClass(), methodName): return def logForLevel(self, message, *args, **kwargs): if self.isEnabledFor(levelNum): self._log(levelNum, message, args, **kwargs) def logToRoot(message, *args, **kwargs): logging.log(levelNum, message, *args, **kwargs) logging.addLevelName(levelNum, levelName) setattr(logging, levelName, levelNum) setattr(logging.getLoggerClass(), methodName, logForLevel) setattr(logging, methodName, logToRoot) class LevelFormatter(logging.Formatter): dfmt = '%a, %d %b %Y %H:%M:%S' default_fmt = logging.Formatter('%(message)s', datefmt = dfmt) def __init__(self, formats, color = False): """ `formats` is a dict { loglevel : logformat } """ self.formatters = {} for loglevel in formats: if color: frmt = self.colorize(formats, loglevel) formats[loglevel] = frmt.format(**ColorExtraMap) self.formatters[loglevel] = logging.Formatter(formats[loglevel], datefmt = self.dfmt) def colorize(self, formats, loglevel): if loglevel == logging.MINI: frmt = '{gray}' + formats[loglevel] + '{end}' elif loglevel == logging.CRITICAL: frmt = '{magenta}{bold}' + formats[loglevel] + '{end}' elif loglevel == logging.ERROR: frmt = '{red}{bold}' + formats[loglevel] + '{end}' elif loglevel == logging.WARNING: frmt = '{yellow}{bold}' + formats[loglevel] + '{end}' elif loglevel == logging.INFO: frmt = '{cyan}' + formats[loglevel] + '{end}' elif loglevel == logging.DEBUG: frmt = '{green}' + formats[loglevel] + '{end}' else: frmt = '{end}' + formats[loglevel] + '{end}' return frmt def format(self, record): formatter = self.formatters.get(record.levelno, self.default_fmt) return formatter.format(record) # based on https://github.com/jruere/multiprocessing-logging (license LGPL-3.0) from multiprocessing import Queue as MPQueue try: # Python 2.x imports import Queue as Queue except ImportError: # Python 3.x imports import queue as Queue import threading class MultiProcessingLogHandler(logging.Handler): def __init__(self, name, handler = None): super(MultiProcessingLogHandler, self).__init__() self.queue = MPQueue(-1) if handler is None: handler = logging.StreamHandler() self.handler = handler self.name = handler.name self.setLevel(self.handler.level) self.setFormatter(self.handler.formatter) self.filters = self.handler.filters self.is_closed = False self.receive_thread = threading.Thread(target = self.receive, name = name) self.receive_thread.daemon = True self.receive_thread.start() def setFormatter(self, fmt): super(MultiProcessingLogHandler, self).setFormatter(fmt) self.handler.setFormatter(fmt) def emit(self, record): try: if record.args: record.msg = record.msg %record.args record.args = None if record.exc_info: dummy = self.format(record) record.exc_info = None self.queue.put_nowait(record) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def receive(self): while not (self.is_closed and self.queue.empty()): try: record = self.queue.get(timeout = 0.2) self.handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except EOFError: break except Queue.Empty: pass except: logging.exception('Error in log handler.') self.queue.close() self.queue.join_thread() def close(self): if not self.is_closed: self.is_closed = True self.receive_thread.join(5.0) self.handler.close() super(MultiProcessingLogHandler, self).close() def logger_create(log_obj, config, mode = 'a'): # Create new level. add_logging_level('MINI', logging.CRITICAL + 10) log_handlers = [] # Configure visualization. if any(opt in ['STDOUT', 'FILESTDOUT', 'STDOUTOFF'] for opt in config['logfile']): if any(opt in ['STDOUT', 'FILESTDOUT'] for opt in config['logfile']): # STDOUT or FILESTDOUT. hand_stdout = logging.StreamHandler(sys.stdout) hand_stdout.name = 'LogStdout' log_handlers.append(hand_stdout) if any(opt in ['STDOUTOFF', 'FILESTDOUT'] for opt in config['logfile']): # STDOUTOFF or FILESTDOUT. hand_rotate = RotatingFileHandler(filename = config['logfile'][1], mode = mode, maxBytes = int(config['logsize'] * 1024 * 512), backupCount = 1, encoding = None, delay = 0) hand_rotate.name = 'LogRotate' log_handlers.append(hand_rotate) elif 'FILEOFF' in config['logfile']: hand_null = logging.FileHandler(os.devnull) hand_null.name = 'LogNull' log_handlers.append(hand_null) else: # FILE. hand_rotate = RotatingFileHandler(filename = config['logfile'][0], mode = mode, maxBytes = int(config['logsize'] * 1024 * 512), backupCount = 1, encoding = None, delay = 0) hand_rotate.name = 'LogRotate' log_handlers.append(hand_rotate) # Configure formattation. try: levelnames = logging._levelToName except AttributeError: levelnames = logging._levelNames levelnum = [k for k in levelnames if k != 0] frmt_gen = '%(asctime)s %(levelname)-8s %(message)s' frmt_std = '%(name)s %(asctime)s %(levelname)-8s %(message)s' frmt_min = '[%(asctime)s] [%(levelname)-8s] %(host)s %(status)s %(product)s %(message)s' def apply_formatter(levelnum, formats, handler, color = False): levelformdict = {} for num in levelnum: if num != logging.CRITICAL + 10: levelformdict[num] = formats[0] else: levelformdict[num] = formats[1] handler.setFormatter(LevelFormatter(levelformdict, color = color)) return handler # Clear old handlers. if log_obj.handlers: log_obj.handlers = [] for log_handler in log_handlers: log_handler.setLevel(config['loglevel']) if log_handler.name in ['LogStdout']: log_handler = apply_formatter(levelnum, (frmt_std, frmt_min), log_handler, color = True) elif log_handler.name in ['LogRotate']: log_handler = apply_formatter(levelnum, (frmt_gen, frmt_min), log_handler) # Attach. if config['asyncmsg']: log_obj.addHandler(MultiProcessingLogHandler('Thread-AsyncMsg{0}'.format(log_handler.name), handler = log_handler)) else: log_obj.addHandler(log_handler) log_obj.setLevel(config['loglevel']) #------------------------------------------------------------------------------------------------------------------------------------------------------------ def check_logfile(optionlog, defaultlog, where): if not isinstance(optionlog, list): optionlog = [optionlog] lenopt = len(optionlog) msg_dir = "{reverse}{red}{bold}argument `-F/--logfile`: invalid directory: '%s'. Exiting...{end}" msg_long = "{reverse}{red}{bold}argument `-F/--logfile`: too much arguments. Exiting...{end}" msg_log = "{reverse}{red}{bold}argument `-F/--logfile`: not a log file, invalid extension: '%s'. Exiting...{end}" def checkdir(path): filename = os.path.basename(path) pathname = os.path.dirname(path) if not os.path.isdir(pathname): if path.count('/') == 0: pathname = filename pretty_printer(put_text = msg_dir %pathname, where = where, to_exit = True) elif not filename.lower().endswith('.log'): pretty_printer(put_text = msg_log %filename, where = where, to_exit = True) if lenopt > 2: pretty_printer(put_text = msg_long, where = where, to_exit = True) if (any(opt in ['FILESTDOUT', 'STDOUTOFF'] for opt in optionlog)): if lenopt == 1: # add default logfile. optionlog.append(defaultlog) elif lenopt == 2: # check directory path. checkdir(optionlog[1]) else: if lenopt == 2: pretty_printer(put_text = msg_long, where = where, to_exit = True) elif lenopt == 1 and (any(opt not in ['STDOUT', 'FILEOFF'] for opt in optionlog)): # check directory path. checkdir(optionlog[0]) return optionlog #------------------------------------------------------------------------------------------------------------------------------------------------------------ # Valid language identifiers to be used in the EPID (see "kms.c" in vlmcsd) ValidLcid = [1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1065, 1066, 1067, 1068, 1069, 1071, 1074, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1086, 1087, 1088, 1089, 1091, 1092, 1093, 1094, 1095, 1097, 1098, 1099, 1100, 1102, 1103, 1104, 1106, 1110, 1111, 1114, 1125, 1131, 1153, 2049, 2052, 2055, 2057, 2058, 2060, 2064, 2067, 2068, 2070, 2074, 2077, 2092, 2107, 2110, 2115, 2155, 3073, 3076, 3079, 3081, 3082, 3084, 3098, 3131, 3179, 4097, 4100, 4103, 4105, 4106, 4108, 4122, 4155, 5121, 5124, 5127, 5129, 5130, 5132, 5146, 5179, 6145, 6153, 6154, 6156, 6170, 6203, 7169, 7177, 7178, 7194, 7227, 8193, 8201, 8202, 8251, 9217, 9225, 9226, 9275, 10241, 10249, 10250, 11265, 11273, 11274, 12289, 12297, 12298, 13313, 13321, 13322, 14337, 14346, 15361, 15370, 16385, 16394, 17418, 18442, 19466, 20490] # http://stackoverflow.com/questions/3425294/how-to-detect-the-os-default-language-in-python def check_lcid(lcid, log_obj): if not lcid or (lcid not in ValidLcid): if hasattr(sys, 'implementation') and sys.implementation.name == 'cpython': fixlcid = 1033 elif os.name == 'nt': import ctypes fixlcid = ctypes.windll.kernel32.GetUserDefaultUILanguage() else: import locale try: fixlcid = next(k for k, v in locale.windows_locale.items() if v == locale.getdefaultlocale()[0]) except StopIteration: fixlcid = 1033 pretty_printer(log_obj = log_obj, put_text = "{reverse}{yellow}{bold}LCID '%s' auto-fixed with LCID '%s'{end}" %(lcid, fixlcid)) return fixlcid return lcid #------------------------------------------------------------------------------------------------------------------------------------------------------------ class KmsParserException(Exception): pass class KmsParser(argparse.ArgumentParser): def error(self, message): raise KmsParserException(message) class KmsParserHelp(object): def replace(self, parser, replace_epilog_with): text = parser.format_help().splitlines() help_list = [] for line in text: if line == parser.description: continue if line == parser.epilog: line = replace_epilog_with help_list.append(line) return help_list def printer(self, parsers): if len(parsers) == 3: parser_base, parser_adj, parser_sub = parsers replace_epilog_with = 80 * '*' + '\n' elif len(parsers) == 1: parser_base = parsers[0] replace_epilog_with = '' print('\n' + parser_base.description) print(len(parser_base.description) * '-' + '\n') for line in self.replace(parser_base, replace_epilog_with): print(line) try: print(parser_adj.description + '\n') for line in self.replace(parser_sub, replace_epilog_with): print(line) except: pass print('\n' + len(parser_base.epilog) * '-') print(parser_base.epilog + '\n') parser_base.exit() def kms_parser_get(parser): zeroarg, onearg = ([] for _ in range(2)) act = vars(parser)['_actions'] for i in range(len(act)): if act[i].option_strings not in ([], ['-h', '--help']): if isinstance(act[i], argparse._StoreAction): onearg.append(act[i].option_strings) else: zeroarg.append(act[i].option_strings) return zeroarg, onearg def kms_parser_check_optionals(userarg, zeroarg, onearg, msg = 'optional py-kms server', exclude_opt_len = []): """ For optionals arguments: Don't allow duplicates, Don't allow abbreviations, Don't allow joining and not existing arguments, Checks length values passed to arguments. """ zeroarg = [item for sublist in zeroarg for item in sublist] onearg = [item for sublist in onearg for item in sublist] allarg = zeroarg + onearg def is_abbrev(allarg, arg_to_check): for opt in allarg: if len(opt) > 2 and opt[2] == arg_to_check[2]: for indx in range(-1, -len(opt), -1): if opt[:indx] == arg_to_check: raise KmsParserException("%s argument `%s` abbreviation not allowed for `%s`" %(msg, arg_to_check, opt)) return False # Check abbreviations, joining, not existing. for arg in userarg: if arg not in allarg: if arg.startswith('-'): if arg == '--' or arg[:2] != '--' or not is_abbrev(allarg, arg): raise KmsParserException("unrecognized %s arguments: `%s`" %(msg, arg)) # Check duplicates. founds = [i for i in userarg if i in allarg] dup = [item for item in set(founds) if founds.count(item) > 1] if dup != []: raise KmsParserException("%s argument `%s` appears several times" %(msg, ', '.join(dup))) # Check length. elem = None for found in founds: if found not in exclude_opt_len: pos = userarg.index(found) try: if found in zeroarg: elem = userarg[pos + 1] num = "zero arguments," elif found in onearg: elem = userarg[pos + 2] num = "one argument," except IndexError: pass if elem and elem not in allarg: raise KmsParserException("%s argument `" %msg + found + "`:" + " expected " + num + " unrecognized: '%s'" %elem) def kms_parser_check_positionals(config, parse_method, arguments = None, msg = 'positional py-kms server'): try: if arguments: config.update(vars(parse_method(arguments))) else: config.update(vars(parse_method())) except KmsParserException as e: e = str(e) if e.startswith('argument'): raise else: raise KmsParserException("unrecognized %s arguments: '%s'" %(msg, e.split(': ')[1])) #------------------------------------------------------------------------------------------------------------------------------------------------------------ def proper_none(dictionary): for key in dictionary.keys(): dictionary[key] = None if dictionary[key] == 'None' else dictionary[key] def check_setup(config, options, logger, where): # 'None'--> None. proper_none(config) # Check logfile. config['logfile'] = check_logfile(config['logfile'], options['lfile']['def'], where = where) # Check logsize (py-kms Gui). if config['logsize'] == "": if any(opt in ['STDOUT', 'FILEOFF'] for opt in config['logfile']): # set a recognized size never used. config['logsize'] = 0 else: pretty_printer(put_text = "{reverse}{red}{bold}argument `-S/--logsize`: invalid with: '%s'. Exiting...{end}" %config['logsize'], where = where, to_exit = True) # Check loglevel (py-kms Gui). if config['loglevel'] == "": # set a recognized level never used. config['loglevel'] = 'ERROR' # Setup hidden / asynchronous messages. hidden = ['STDOUT', 'FILESTDOUT', 'STDOUTOFF'] view_flag = (False if any(opt in hidden for opt in config['logfile']) else True) if where == 'srv': ShellMessage.viewsrv = view_flag ShellMessage.asyncmsgsrv = config['asyncmsg'] elif where == 'clt': ShellMessage.viewclt = view_flag ShellMessage.asyncmsgclt = config['asyncmsg'] # Create log. logger_create(logger, config, mode = 'a') # Check port. if (config['port'] == "") or (not 1 <= config['port'] <= 65535): pretty_printer(log_obj = logger.error, where = where, to_exit = True, put_text = "{reverse}{red}{bold}Port number '%s' is invalid. Enter between 1 - 65535. Exiting...{end}" %config['port']) #------------------------------------------------------------------------------------------------------------------------------------------------------------ # http://joshpoley.blogspot.com/2011/09/hresults-user-0x004.html (slerror.h) ErrorCodes = { 'SL_E_SRV_INVALID_PUBLISH_LICENSE' : (0xC004B001, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_INVALID_PRODUCT_KEY_LICENSE' : (0xC004B002, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_INVALID_RIGHTS_ACCOUNT_LICENSE' : (0xC004B003, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_INVALID_LICENSE_STRUCTURE' : (0xC004B004, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_AUTHORIZATION_FAILED' : (0xC004B005, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_INVALID_BINDING' : (0xC004B006, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_SERVER_PONG' : (0xC004B007, 'The activation server reported that the computer could not connect to the activation server.'), 'SL_E_SRV_INVALID_PAYLOAD' : (0xC004B008, 'The activation server determined that the product could not be activated.'), 'SL_E_SRV_INVALID_SECURITY_PROCESSOR_LICENSE' : (0xC004B009, 'The activation server determined that the license is invalid.'), 'SL_E_SRV_BUSINESS_TOKEN_ENTRY_NOT_FOUND' : (0xC004B010, 'The activation server determined that required business token entry cannot be found.'), 'SL_E_SRV_CLIENT_CLOCK_OUT_OF_SYNC' : (0xC004B011, 'The activation server determined that your computer clock time is not correct. You must correct your clock before you can activate.'), 'SL_E_SRV_GENERAL_ERROR' : (0xC004B100, 'The activation server determined that the product could not be activated.'), 'SL_E_CHPA_PRODUCT_KEY_OUT_OF_RANGE' : (0xC004C001, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_INVALID_BINDING' : (0xC004C002, 'The activation server determined there is a problem with the specified product key.'), 'SL_E_CHPA_PRODUCT_KEY_BLOCKED' : (0xC004C003, 'The activation server determined the specified product key has been blocked.'), 'SL_E_CHPA_INVALID_PRODUCT_KEY' : (0xC004C004, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_BINDING_NOT_FOUND' : (0xC004C005, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_BINDING_MAPPING_NOT_FOUND' : (0xC004C006, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_UNSUPPORTED_PRODUCT_KEY' : (0xC004C007, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_MAXIMUM_UNLOCK_EXCEEDED' : (0xC004C008, 'The activation server reported that the product key has exceeded its unlock limit.'), 'SL_E_CHPA_ACTCONFIG_ID_NOT_FOUND' : (0xC004C009, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_INVALID_PRODUCT_DATA_ID' : (0xC004C00A, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_INVALID_PRODUCT_DATA' : (0xC004C00B, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_SYSTEM_ERROR' : (0xC004C00C, 'The activation server experienced an error.'), 'SL_E_CHPA_INVALID_ACTCONFIG_ID' : (0xC004C00D, 'The activation server determined the product key is not valid.'), 'SL_E_CHPA_INVALID_PRODUCT_KEY_LENGTH' : (0xC004C00E, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_INVALID_PRODUCT_KEY_FORMAT' : (0xC004C00F, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_INVALID_PRODUCT_KEY_CHAR' : (0xC004C010, 'The activation server determined the specified product key is invalid.'), 'SL_E_CHPA_INVALID_BINDING_URI' : (0xC004C011, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_NETWORK_ERROR' : (0xC004C012, 'The activation server experienced a network error.'), 'SL_E_CHPA_DATABASE_ERROR' : (0xC004C013, 'The activation server experienced an error.'), 'SL_E_CHPA_INVALID_ARGUMENT' : (0xC004C014, 'The activation server experienced an error.'), 'SL_E_CHPA_RESPONSE_NOT_AVAILABLE' : (0xC004C015, 'The activation server experienced an error.'), 'SL_E_CHPA_OEM_SLP_COA0' : (0xC004C016, 'The activation server reported that the specified product key cannot be used for online activation.'), 'SL_E_CHPA_PRODUCT_KEY_BLOCKED_IPLOCATION' : (0xC004C017, 'The activation server determined the specified product key has been blocked for this geographic location.'), 'SL_E_CHPA_DMAK_LIMIT_EXCEEDED' : (0xC004C020, 'The activation server reported that the Multiple Activation Key has exceeded its limit.'), 'SL_E_CHPA_DMAK_EXTENSION_LIMIT_EXCEEDED' : (0xC004C021, 'The activation server reported that the Multiple Activation Key extension limit has been exceeded.'), 'SL_E_CHPA_REISSUANCE_LIMIT_NOT_FOUND' : (0xC004C022, 'The activation server reported that the re-issuance limit was not found.'), 'SL_E_CHPA_OVERRIDE_REQUEST_NOT_FOUND' : (0xC004C023, 'The activation server reported that the override request was not found.'), 'SL_E_CHPA_TIMEBASED_ACTIVATION_BEFORE_START_DATE' : (0xC004C030, 'The activation server reported that time based activation attempted before start date.'), 'SL_E_CHPA_TIMEBASED_ACTIVATION_AFTER_END_DATE' : (0xC004C031, 'The activation server reported that time based activation attempted after end date.'), 'SL_E_CHPA_TIMEBASED_ACTIVATION_NOT_AVAILABLE' : (0xC004C032, 'The activation server reported that new time based activation is not available.'), 'SL_E_CHPA_TIMEBASED_PRODUCT_KEY_NOT_CONFIGURED' : (0xC004C033, 'The activation server reported that the time based product key is not configured for activation.'), 'SL_E_CHPA_NO_RULES_TO_ACTIVATE' : (0xC004C04F, 'The activation server reported that no business rules available to activate specified product key.'), 'SL_E_CHPA_GENERAL_ERROR' : (0xC004C050, 'The activation server experienced a general error.'), 'SL_E_CHPA_DIGITALMARKER_INVALID_BINDING' : (0xC004C051, 'The activation server determined the license is invalid.'), 'SL_E_CHPA_DIGITALMARKER_BINDING_NOT_CONFIGURED' : (0xC004C052, 'The activation server determined there is a problem with the specified product key.'), 'SL_E_CHPA_DYNAMICALLY_BLOCKED_PRODUCT_KEY' : (0xC004C060, 'The activation server determined the specified product key has been blocked.'), 'SL_E_INVALID_LICENSE_STATE_BREACH_GRACE' : (0xC004C291, 'Genuine Validation determined the license state is invalid.'), 'SL_E_INVALID_LICENSE_STATE_BREACH_GRACE_EXPIRED' : (0xC004C292, 'Genuine Validation determined the license state is invalid.'), 'SL_E_INVALID_TEMPLATE_ID' : (0xC004C2F6, 'Genuine Validation determined the validation input template identifier is invalid.'), 'SL_E_INVALID_XML_BLOB' : (0xC004C2FA, 'Genuine Validation determined the validation input data blob is invalid.'), 'SL_E_VALIDATION_BLOB_PARAM_NOT_FOUND' : (0xC004C327, 'Genuine Validation determined the validation input data blob parameter is invalid.'), 'SL_E_INVALID_CLIENT_TOKEN' : (0xC004C328, 'Genuine Validation determined the client token data is invalid.'), 'SL_E_INVALID_OFFLINE_BLOB' : (0xC004C329, 'Genuine Validation determined the offline data blob is invalid.'), 'SL_E_OFFLINE_VALIDATION_BLOB_PARAM_NOT_FOUND' : (0xC004C32A, 'Genuine Validation determined the offline data blob parameter is invalid.'), 'SL_E_INVALID_OSVERSION_TEMPLATEID' : (0xC004C32B, 'Genuine Validation determined the validation template identifier is invalid for this version of the Windows operating system.'), 'SL_E_OFFLINE_GENUINE_BLOB_REVOKED' : (0xC004C32C, 'Genuine Validation determined the offline genuine blob is revoked.'), 'SL_E_OFFLINE_GENUINE_BLOB_NOT_FOUND' : (0xC004C32D, 'Genuine Validation determined the offline genuine blob is not found.'), 'SL_E_CHPA_MSCH_RESPONSE_NOT_AVAILABLE_VGA' : (0xC004C3FF, 'The activation server determined the VGA service response is not available in the expected format.'), 'SL_E_INVALID_OS_FOR_PRODUCT_KEY' : (0xC004C401, 'Genuine Validation determined the product key is invalid for this version of the Windows operating system.'), 'SL_E_INVALID_FILE_HASH' : (0xC004C4A1, 'Genuine Validation determined the file hash is invalid.'), 'SL_E_VALIDATION_BLOCKED_PRODUCT_KEY' : (0xC004C4A2, 'Genuine Validation determined the product key has been blocked.'), 'SL_E_MISMATCHED_KEY_TYPES' : (0xC004C4A4, 'Genuine Validation determined the product key type is invalid.'), 'SL_E_VALIDATION_INVALID_PRODUCT_KEY' : (0xC004C4A5, 'Genuine Validation determined the product key is invalid.'), 'SL_E_INVALID_OEM_OR_VOLUME_BINDING_DATA' : (0xC004C4A7, 'Genuine Validation determined the OEM or Volume binding data is invalid.'), 'SL_E_INVALID_LICENSE_STATE' : (0xC004C4A8, 'Genuine Validation determined the license state is invalid.'), 'SL_E_IP_LOCATION_FALIED' : (0xC004C4A9, 'Genuine Validation determined the specified product key has been blocked for this geographic location.'), 'SL_E_SOFTMOD_EXPLOIT_DETECTED' : (0xC004C4AB, 'Genuine Validation detected Windows licensing exploits.'), 'SL_E_INVALID_TOKEN_DATA' : (0xC004C4AC, 'Genuine Validation determined the token activation data is invalid.'), 'SL_E_HEALTH_CHECK_FAILED_NEUTRAL_FILES' : (0xC004C4AD, 'Genuine Validation detected tampered Windows binaries.'), 'SL_E_HEALTH_CHECK_FAILED_MUI_FILES' : (0xC004C4AE, 'Genuine Validation detected tampered Windows binaries.'), 'SL_E_INVALID_AD_DATA' : (0xC004C4AF, 'Genuine Validation determined the active directory activation data is invalid.'), 'SL_E_INVALID_RSDP_COUNT' : (0xC004C4B0, 'Genuine Validation detected Windows licensing exploits.'), 'SL_E_ENGINE_DETECTED_EXPLOIT' : (0xC004C4B1, 'Genuine Validation detected Windows licensing exploits.'), 'SL_E_NOTIFICATION_BREACH_DETECTED' : (0xC004C531, 'Genuine Validation detected Windows licensing exploits.'), 'SL_E_NOTIFICATION_GRACE_EXPIRED' : (0xC004C532, 'Genuine Validation determined the license state is in notification due to expired grace.'), 'SL_E_NOTIFICATION_OTHER_REASONS' : (0xC004C533, 'Genuine Validation determined the license state is in notification.'), 'SL_E_NON_GENUINE_STATUS_LAST' : (0xC004C600, 'Genuine Validation determined your copy of Windows is not genuine.'), 'SL_E_CHPA_BUSINESS_RULE_INPUT_NOT_FOUND' : (0xC004C700, 'The activation server reported that business rule cound not find required input.'), 'SL_E_CHPA_NULL_VALUE_FOR_PROPERTY_NAME_OR_ID' : (0xC004C750, 'The activation server reported that NULL value specified for business property name and Id.'), 'SL_E_CHPA_UNKNOWN_PROPERTY_NAME' : (0xC004C751, 'The activation server reported that property name specifies unknown property.'), 'SL_E_CHPA_UNKNOWN_PROPERTY_ID' : (0xC004C752, 'The activation server reported that property Id specifies unknown property.'), 'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCTKEY_BINDING' : (0xC004C755, 'The activation server reported that it failed to update product key binding.'), 'SL_E_CHPA_FAILED_TO_INSERT_PRODUCTKEY_BINDING' : (0xC004C756, 'The activation server reported that it failed to insert product key binding.'), 'SL_E_CHPA_FAILED_TO_DELETE_PRODUCTKEY_BINDING' : (0xC004C757, 'The activation server reported that it failed to delete product key binding.'), 'SL_E_CHPA_FAILED_TO_PROCESS_PRODUCT_KEY_BINDINGS_XML' : (0xC004C758, 'The activation server reported that it failed to process input XML for product key bindings.'), 'SL_E_CHPA_FAILED_TO_INSERT_PRODUCT_KEY_PROPERTY' : (0xC004C75A, 'The activation server reported that it failed to insert product key property.'), 'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCT_KEY_PROPERTY' : (0xC004C75B, 'The activation server reported that it failed to update product key property.'), 'SL_E_CHPA_FAILED_TO_DELETE_PRODUCT_KEY_PROPERTY' : (0xC004C75C, 'The activation server reported that it failed to delete product key property.'), 'SL_E_CHPA_UNKNOWN_PRODUCT_KEY_TYPE' : (0xC004C764, 'The activation server reported that the product key type is unknown.'), 'SL_E_CHPA_PRODUCT_KEY_BEING_USED' : (0xC004C770, 'The activation server reported that the product key type is being used by another user.'), 'SL_E_CHPA_FAILED_TO_INSERT_PRODUCT_KEY_RECORD' : (0xC004C780, 'The activation server reported that it failed to insert product key record.'), 'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCT_KEY_RECORD' : (0xC004C781, 'The activation server reported that it failed to update product key record.'), 'SL_REMAPPING_SP_PUB_API_INVALID_LICENSE' : (0xC004D000, ''), 'SL_REMAPPING_SP_PUB_API_INVALID_ALGORITHM_TYPE' : (0xC004D009, ''), 'SL_REMAPPING_SP_PUB_API_TOO_MANY_LOADED_ENVIRONMENTS' : (0xC004D00C, ''), 'SL_REMAPPING_SP_PUB_API_BAD_GET_INFO_QUERY' : (0xC004D012, ''), 'SL_REMAPPING_SP_PUB_API_INVALID_KEY_LENGTH' : (0xC004D055, ''), 'SL_REMAPPING_SP_PUB_API_NO_AES_PROVIDER' : (0xC004D073, ''), 'SL_REMAPPING_SP_PUB_API_HANDLE_NOT_COMMITED' : (0xC004D081, 'The handle was used before calling SPCommit with it.'), 'SL_REMAPPING_SP_PUB_GENERAL_NOT_INITIALIZED' : (0xC004D101, 'The security processor reported an initialization error.'), 'SL_REMAPPING_SP_STATUS_SYSTEM_TIME_SKEWED' : (0x8004D102, 'The security processor reported that the machine time is inconsistent with the trusted time.'), 'SL_REMAPPING_SP_STATUS_GENERIC_FAILURE' : (0xC004D103, 'The security processor reported that an error has occurred.'), 'SL_REMAPPING_SP_STATUS_INVALIDARG' : (0xC004D104, 'The security processor reported that invalid data was used.'), 'SL_REMAPPING_SP_STATUS_ALREADY_EXISTS' : (0xC004D105, 'The security processor reported that the value already exists.'), 'SL_REMAPPING_SP_STATUS_INSUFFICIENT_BUFFER' : (0xC004D107, 'The security processor reported that an insufficient buffer was used.'), 'SL_REMAPPING_SP_STATUS_INVALIDDATA' : (0xC004D108, 'The security processor reported that invalid data was used.'), 'SL_REMAPPING_SP_STATUS_INVALID_SPAPI_CALL' : (0xC004D109, 'The security processor reported that an invalid call was made.'), 'SL_REMAPPING_SP_STATUS_INVALID_SPAPI_VERSION' : (0xC004D10A, 'The security processor reported a version mismatch error.'), 'SL_REMAPPING_SP_STATUS_DEBUGGER_DETECTED' : (0x8004D10B, 'The security processor cannot operate while a debugger is attached.'), 'SL_REMAPPING_SP_STATUS_NO_MORE_DATA' : (0xC004D10C, 'No more data is available.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_KEYLENGTH' : (0xC004D201, 'The length of the cryptopgraphic key material/blob is invalid.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_BLOCKLENGTH' : (0xC004D202, 'The block length is not correct for this algorithm.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_CIPHER' : (0xC004D203, 'The Cryptopgrahic cipher/algorithm type is invalid.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_CIPHERMODE' : (0xC004D204, 'The specified cipher mode is invalid. For example both encrypt and decrypt cannot be specified for symmetric keys.'), 'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_PROVIDERID' : (0xC004D205, 'The SPAPIID for the specified Cryptographic Provider is unknown.'), 'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_KEYID' : (0xC004D206, 'The SPAPIID for the specified Cryptographic Key (type) is unknown.'), 'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_HASHID' : (0xC004D207, 'The SPAPIID for the specified Cryptographic Hash is unknown.'), 'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_ATTRIBUTEID' : (0xC004D208, 'The SPAPIID for the specified Cryptographic Attribute is unknown.'), 'SL_REMAPPING_SP_PUB_CRYPTO_HASH_FINALIZED' : (0xC004D209, 'The hash object has been finalized and can no longer be updated.'), 'SL_REMAPPING_SP_PUB_CRYPTO_KEY_NOT_AVAILABLE' : (0xC004D20A, 'The key is not available within the current state.'), 'SL_REMAPPING_SP_PUB_CRYPTO_KEY_NOT_FOUND' : (0xC004D20B, 'The key does not exist. It may not have have been created yet.'), 'SL_REMAPPING_SP_PUB_CRYPTO_NOT_BLOCK_ALIGNED' : (0xC004D20C, "The data length is not a multiple of the algorithm's block length."), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_SIGNATURELENGTH' : (0xC004D20D, 'The length of the signature is not valid.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_SIGNATURE' : (0xC004D20E, 'The signature does not correlate with the comparison hash.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_BLOCK' : (0xC004D20F, 'The RSA block is not valid.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_FORMAT' : (0xC004D210, 'The format of the RSA block is not valid.'), 'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_PADDING' : (0xC004D211, 'The CBC padding is not valid.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED' : (0xC004D301, 'The security processor reported that the trusted data store was tampered.'), 'SL_REMAPPING_SP_PUB_TS_REARMED' : (0xC004D302, 'The security processor reported that the trusted data store was rearmed.'), 'SL_REMAPPING_SP_PUB_TS_RECREATED' : (0xC004D303, 'The security processor reported that the trusted store has been recreated.'), 'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_NOT_FOUND' : (0xC004D304, 'The security processor reported that entry key was not found in the trusted data store.'), 'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_ALREADY_EXISTS' : (0xC004D305, 'The security processor reported that the entry key already exists in the trusted data store.'), 'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_SIZE_TOO_BIG' : (0xC004D306, 'The security processor reported that the entry key is too big to fit in the trusted data store.'), 'SL_REMAPPING_SP_PUB_TS_MAX_REARM_REACHED' : (0xC004D307, 'The security processor reported that the maximum allowed number of re-arms has been exceeded. You must re-install the OS before trying to re-arm again.'), 'SL_REMAPPING_SP_PUB_TS_DATA_SIZE_TOO_BIG' : (0xC004D308, 'The security processor has reported that entry data size is too big to fit in the trusted data store.'), 'SL_REMAPPING_SP_PUB_TS_INVALID_HW_BINDING' : (0xC004D309, 'The security processor has reported that the machine has gone out of hardware tolerance.'), 'SL_REMAPPING_SP_PUB_TIMER_ALREADY_EXISTS' : (0xC004D30A, 'The security processor has reported that the secure timer already exists.'), 'SL_REMAPPING_SP_PUB_TIMER_NOT_FOUND' : (0xC004D30B, 'The security processor has reported that the secure timer was not found.'), 'SL_REMAPPING_SP_PUB_TIMER_EXPIRED' : (0xC004D30C, 'The security processor has reported that the secure timer has expired.'), 'SL_REMAPPING_SP_PUB_TIMER_NAME_SIZE_TOO_BIG' : (0xC004D30D, 'The security processor has reported that the secure timer name is too long.'), 'SL_REMAPPING_SP_PUB_TS_FULL' : (0xC004D30E, 'The security processor reported that the trusted data store is full.'), 'SL_REMAPPING_SP_PUB_TRUSTED_TIME_OK' : (0x4004D30F, 'Trusted time is already up-to-date.'), 'SL_REMAPPING_SP_PUB_TS_ENTRY_READ_ONLY' : (0xC004D310, 'Read-only entry cannot be modified.'), 'SL_REMAPPING_SP_PUB_TIMER_READ_ONLY' : (0xC004D311, 'Read-only timer cannot be modified.'), 'SL_REMAPPING_SP_PUB_TS_ATTRIBUTE_READ_ONLY' : (0xC004D312, 'Read-only attribute cannot be modified.'), 'SL_REMAPPING_SP_PUB_TS_ATTRIBUTE_NOT_FOUND' : (0xC004D313, 'Attribute not found.'), 'SL_REMAPPING_SP_PUB_TS_ACCESS_DENIED' : (0xC004D314, 'Trusted Store access denied.'), 'SL_REMAPPING_SP_PUB_TS_NAMESPACE_NOT_FOUND' : (0xC004D315, 'Namespace not found.'), 'SL_REMAPPING_SP_PUB_TS_NAMESPACE_IN_USE' : (0xC004D316, 'Namespace in use.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_BREADCRUMB_LOAD_INVALID' : (0xC004D317, 'Trusted store tampered.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_BREADCRUMB_GENERATION' : (0xC004D318, 'Trusted store tampered.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_INVALID_DATA' : (0xC004D319, 'Trusted store tampered.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_NO_DATA' : (0xC004D31A, 'Trusted store tampered.'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_DATA_BREADCRUMB_MISMATCH' : (0xC004D31B, 'Trusted store tampered'), 'SL_REMAPPING_SP_PUB_TS_TAMPERED_DATA_VERSION_MISMATCH' : (0xC004D31C, 'Trusted store tampered.'), 'SL_REMAPPING_SP_PUB_TAMPER_MODULE_AUTHENTICATION' : (0xC004D401, 'The security processor reported a system file mismatch error.'), 'SL_REMAPPING_SP_PUB_TAMPER_SECURITY_PROCESSOR_PATCHED' : (0xC004D402, 'The security processor reported a system file mismatch error.'), 'SL_REMAPPING_SP_PUB_KM_CACHE_TAMPER' : (0xC004D501, 'The security processor reported an error with the kernel data.'), 'SL_REMAPPING_SP_PUB_KM_CACHE_TAMPER_RESTORE_FAILED' : (0xC004D502, 'Kernel Mode Cache is tampered and the restore attempt failed.'), 'SL_REMAPPING_SP_PUB_KM_CACHE_IDENTICAL' : (0x4004D601, 'Kernel Mode Cache was not changed.'), 'SL_REMAPPING_SP_PUB_KM_CACHE_POLICY_CHANGED' : (0x4004D602, 'Reboot-requiring policies have changed.'), 'SL_REMAPPING_SP_STATUS_PUSHKEY_CONFLICT' : (0xC004D701, 'External decryption key was already set for specified feature.'), 'SL_REMAPPING_SP_PUB_PROXY_SOFT_TAMPER' : (0xC004D702, 'Error occured during proxy execution'), 'SL_E_INVALID_CONTEXT' : (0xC004E001, 'The Software Licensing Service determined that the specified context is invalid.'), 'SL_E_TOKEN_STORE_INVALID_STATE' : (0xC004E002, 'The Software Licensing Service reported that the license store contains inconsistent data.'), 'SL_E_EVALUATION_FAILED' : (0xC004E003, 'The Software Licensing Service reported that license evaluation failed.'), 'SL_E_NOT_EVALUATED' : (0xC004E004, 'The Software Licensing Service reported that the license has not been evaluated.'), 'SL_E_NOT_ACTIVATED' : (0xC004E005, 'The Software Licensing Service reported that the license is not activated.'), 'SL_E_INVALID_GUID' : (0xC004E006, 'The Software Licensing Service reported that the license contains invalid data.'), 'SL_E_TOKSTO_TOKEN_NOT_FOUND' : (0xC004E007, 'The Software Licensing Service reported that the license store does not contain the requested license.'), 'SL_E_TOKSTO_NO_PROPERTIES' : (0xC004E008, 'The Software Licensing Service reported that the license property is invalid.'), 'SL_E_TOKSTO_NOT_INITIALIZED' : (0xC004E009, 'The Software Licensing Service reported that the license store is not initialized.'), 'SL_E_TOKSTO_ALREADY_INITIALIZED' : (0xC004E00A, 'The Software Licensing Service reported that the license store is already initialized.'), 'SL_E_TOKSTO_NO_ID_SET' : (0xC004E00B, 'The Software Licensing Service reported that the license property is invalid.'), 'SL_E_TOKSTO_CANT_CREATE_FILE' : (0xC004E00C, 'The Software Licensing Service reported that the license could not be opened or created.'), 'SL_E_TOKSTO_CANT_WRITE_TO_FILE' : (0xC004E00D, 'The Software Licensing Service reported that the license could not be written.'), 'SL_E_TOKSTO_CANT_READ_FILE' : (0xC004E00E, 'The Software Licensing Service reported that the license store could not read the license file.'), 'SL_E_TOKSTO_CANT_PARSE_PROPERTIES' : (0xC004E00F, 'The Software Licensing Service reported that the license property is corrupted.'), 'SL_E_TOKSTO_PROPERTY_NOT_FOUND' : (0xC004E010, 'The Software Licensing Service reported that the license property is missing.'), 'SL_E_TOKSTO_INVALID_FILE' : (0xC004E011, 'The Software Licensing Service reported that the license store contains an invalid license file.'), 'SL_E_TOKSTO_CANT_CREATE_MUTEX' : (0xC004E012, 'The Software Licensing Service reported that the license store failed to start synchronization properly.'), 'SL_E_TOKSTO_CANT_ACQUIRE_MUTEX' : (0xC004E013, 'The Software Licensing Service reported that the license store failed to synchronize properly.'), 'SL_E_TOKSTO_NO_TOKEN_DATA' : (0xC004E014, 'The Software Licensing Service reported that the license property is invalid.'), 'SL_E_EUL_CONSUMPTION_FAILED' : (0xC004E015, 'The Software Licensing Service reported that license consumption failed.'), 'SL_E_PKEY_INVALID_CONFIG' : (0xC004E016, 'The Software Licensing Service reported that the product key is invalid.'), 'SL_E_PKEY_INVALID_UNIQUEID' : (0xC004E017, 'The Software Licensing Service reported that the product key is invalid.'), 'SL_E_PKEY_INVALID_ALGORITHM' : (0xC004E018, 'The Software Licensing Service reported that the product key is invalid.'), 'SL_E_PKEY_INTERNAL_ERROR' : (0xC004E019, 'The Software Licensing Service determined that validation of the specified product key failed.'), 'SL_E_LICENSE_INVALID_ADDON_INFO' : (0xC004E01A, 'The Software Licensing Service reported that invalid add-on information was found.'), 'SL_E_HWID_ERROR' : (0xC004E01B, 'The Software Licensing Service reported that not all hardware information could be collected.'), 'SL_E_PKEY_INVALID_KEYCHANGE1' : (0xC004E01C, 'This evaluation product key is no longer valid.'), 'SL_E_PKEY_INVALID_KEYCHANGE2' : (0xC004E01D, 'The new product key cannot be used on this installation of Windows. Type a different product key. (CD-AB)'), 'SL_E_PKEY_INVALID_KEYCHANGE3' : (0xC004E01E, 'The new product key cannot be used on this installation of Windows. Type a different product key. (AB-AB)'), 'SL_E_POLICY_OTHERINFO_MISMATCH' : (0xC004E020, 'The Software Licensing Service reported that there is a mismatched between a policy value and information stored in the OtherInfo section.'), 'SL_E_PRODUCT_UNIQUENESS_GROUP_ID_INVALID' : (0xC004E021, 'The Software Licensing Service reported that the Genuine information contained in the license is not consistent.'), 'SL_E_SECURE_STORE_ID_MISMATCH' : (0xC004E022, 'The Software Licensing Service reported that the secure store id value in license does not match with the current value.'), 'SL_E_INVALID_RULESET_RULE' : (0xC004E023, 'The Software Licensing Service reported that the notification rules appear to be invalid.'), 'SL_E_INVALID_CONTEXT_DATA' : (0xC004E024, 'The Software Licensing Service reported that the reported machine data appears to be invalid.'), 'SL_E_INVALID_HASH' : (0xC004E025, 'The Software Licensing Service reported that the data hash does not correspond to the data.'), 'SL_E_INVALID_USE_OF_ADD_ON_PKEY' : (0x8004E026, 'The Software Licensing Service reported that a valid product key for an add-on sku was entered where a Windows product key was expected.'), 'SL_E_WINDOWS_VERSION_MISMATCH' : (0xC004E027, 'The Software Licensing Service reported that the version of SPPSvc does not match the policy.'), 'SL_E_ACTIVATION_IN_PROGRESS' : (0xC004E028, 'The Software Licensing Service reported that there is another activation attempt in progress for this sku. Please wait for that attempt to complete before trying again.'), 'SL_E_STORE_UPGRADE_TOKEN_REQUIRED' : (0xC004E029, 'The Software Licensing Service reported that the activated license requires a corresponding Store upgrade license in order to work. Please visit the Store to purchase a new license or re-download an existing one.'), 'SL_E_STORE_UPGRADE_TOKEN_WRONG_EDITION' : (0xC004E02A, 'The Software Licensing Service reported that the Store upgrade license is not enabled for the current OS edition. Please visit the Store to purchase the appropriate license.'), 'SL_E_STORE_UPGRADE_TOKEN_WRONG_PID' : (0xC004E02B, 'The Software Licensing Service reported that the Store upgrade license does not match the current active product key. Please visit the Store to purchase a new license or re-download an existing one.'), 'SL_E_STORE_UPGRADE_TOKEN_NOT_PRS_SIGNED' : (0xC004E02C, 'The Software Licensing Service reported that the Store upgrade license does not match the current signing level for the installed Operating System. Please visit the Store to purchase a new license or re-download an existing one.'), 'SL_E_STORE_UPGRADE_TOKEN_WRONG_VERSION' : (0xC004E02D, 'The Software Licensing Service reported that the Store upgrade license does not enable the current version of the installed Operating System. Please visit the Store to purchase a new license or re-download an existing one.'), 'SL_E_STORE_UPGRADE_TOKEN_NOT_AUTHORIZED' : (0xC004E02E, 'The Software Licensing Service reported that the Store upgrade license could not be authorized. Please visit the Store to purchase a new license or re-download an existing one.'), 'SL_E_SFS_INVALID_FS_VERSION' : (0x8004E101, 'The Software Licensing Service reported that the Token Store file version is invalid.'), 'SL_E_SFS_INVALID_FD_TABLE' : (0x8004E102, 'The Software Licensing Service reported that the Token Store contains an invalid descriptor table.'), 'SL_E_SFS_INVALID_SYNC' : (0x8004E103, 'The Software Licensing Service reported that the Token Store contains a token with an invalid header/footer.'), 'SL_E_SFS_BAD_TOKEN_NAME' : (0x8004E104, 'The Software Licensing Service reported that a Token Store token has an invalid name.'), 'SL_E_SFS_BAD_TOKEN_EXT' : (0x8004E105, 'The Software Licensing Service reported that a Token Store token has an invalid extension.'), 'SL_E_SFS_DUPLICATE_TOKEN_NAME' : (0x8004E106, 'The Software Licensing Service reported that the Token Store contains a duplicate token.'), 'SL_E_SFS_TOKEN_SIZE_MISMATCH' : (0x8004E107, 'The Software Licensing Service reported that a token in the Token Store has a size mismatch.'), 'SL_E_SFS_INVALID_TOKEN_DATA_HASH' : (0x8004E108, 'The Software Licensing Service reported that a token in the Token Store contains an invalid hash.'), 'SL_E_SFS_FILE_READ_ERROR' : (0x8004E109, 'The Software Licensing Service reported that the Token Store was unable to read a token.'), 'SL_E_SFS_FILE_WRITE_ERROR' : (0x8004E10A, 'The Software Licensing Service reported that the Token Store was unable to write a token.'), 'SL_E_SFS_INVALID_FILE_POSITION' : (0x8004E10B, 'The Software Licensing Service reported that the Token Store attempted an invalid file operation.'), 'SL_E_SFS_NO_ACTIVE_TRANSACTION' : (0x8004E10C, 'The Software Licensing Service reported that there is no active transaction.'), 'SL_E_SFS_INVALID_FS_HEADER' : (0x8004E10D, 'The Software Licensing Service reported that the Token Store file header is invalid.'), 'SL_E_SFS_INVALID_TOKEN_DESCRIPTOR' : (0x8004E10E, 'The Software Licensing Service reported that a Token Store token descriptor is invalid.'), 'SL_E_INTERNAL_ERROR' : (0xC004F001, 'The Software Licensing Service reported an internal error.'), 'SL_E_RIGHT_NOT_CONSUMED' : (0xC004F002, 'The Software Licensing Service reported that rights consumption failed.'), 'SL_E_USE_LICENSE_NOT_INSTALLED' : (0xC004F003, 'The Software Licensing Service reported that the required license could not be found.'), 'SL_E_MISMATCHED_PKEY_RANGE' : (0xC004F004, 'The Software Licensing Service reported that the product key does not match the range defined in the license.'), 'SL_E_MISMATCHED_PID' : (0xC004F005, 'The Software Licensing Service reported that the product key does not match the product key for the license.'), 'SL_E_EXTERNAL_SIGNATURE_NOT_FOUND' : (0xC004F006, 'The Software Licensing Service reported that the signature file for the license is not available.'), 'SL_E_RAC_NOT_AVAILABLE' : (0xC004F007, 'The Software Licensing Service reported that the license could not be found.'), 'SL_E_SPC_NOT_AVAILABLE' : (0xC004F008, 'The Software Licensing Service reported that the license could not be found.'), 'SL_E_GRACE_TIME_EXPIRED' : (0xC004F009, 'The Software Licensing Service reported that the grace period expired.'), 'SL_E_MISMATCHED_APPID' : (0xC004F00A, 'The Software Licensing Service reported that the application ID does not match the application ID for the license.'), 'SL_E_NO_PID_CONFIG_DATA' : (0xC004F00B, 'The Software Licensing Service reported that the product identification data is not available.'), 'SL_I_OOB_GRACE_PERIOD' : (0x4004F00C, 'The Software Licensing Service reported that the application is running within the valid grace period.'), 'SL_I_OOT_GRACE_PERIOD' : (0x4004F00D, 'The Software Licensing Service reported that the application is running within the valid out of tolerance grace period.'), 'SL_E_MISMATCHED_SECURITY_PROCESSOR' : (0xC004F00E, 'The Software Licensing Service determined that the license could not be used by the current version of the security processor component.'), 'SL_E_OUT_OF_TOLERANCE' : (0xC004F00F, 'The Software Licensing Service reported that the hardware ID binding is beyond the level of tolerance.'), 'SL_E_INVALID_PKEY' : (0xC004F010, 'The Software Licensing Service reported that the product key is invalid.'), 'SL_E_LICENSE_FILE_NOT_INSTALLED' : (0xC004F011, 'The Software Licensing Service reported that the license file is not installed.'), 'SL_E_VALUE_NOT_FOUND' : (0xC004F012, 'The Software Licensing Service reported that the call has failed because the value for the input key was not found.'), 'SL_E_RIGHT_NOT_GRANTED' : (0xC004F013, 'The Software Licensing Service determined that there is no permission to run the software.'), 'SL_E_PKEY_NOT_INSTALLED' : (0xC004F014, 'The Software Licensing Service reported that the product key is not available.'), 'SL_E_PRODUCT_SKU_NOT_INSTALLED' : (0xC004F015, 'The Software Licensing Service reported that the license is not installed.'), 'SL_E_NOT_SUPPORTED' : (0xC004F016, 'The Software Licensing Service determined that the request is not supported.'), 'SL_E_PUBLISHING_LICENSE_NOT_INSTALLED' : (0xC004F017, 'The Software Licensing Service reported that the license is not installed.'), 'SL_E_LICENSE_SERVER_URL_NOT_FOUND' : (0xC004F018, 'The Software Licensing Service reported that the license does not contain valid location data for the activation server.'), 'SL_E_INVALID_EVENT_ID' : (0xC004F019, 'The Software Licensing Service determined that the requested event ID is invalid.'), 'SL_E_EVENT_NOT_REGISTERED' : (0xC004F01A, 'The Software Licensing Service determined that the requested event is not registered with the service.'), 'SL_E_EVENT_ALREADY_REGISTERED' : (0xC004F01B, 'The Software Licensing Service reported that the event ID is already registered.'), 'SL_E_DECRYPTION_LICENSES_NOT_AVAILABLE' : (0xC004F01C, 'The Software Licensing Service reported that the license is not installed.'), 'SL_E_LICENSE_SIGNATURE_VERIFICATION_FAILED' : (0xC004F01D, 'The Software Licensing Service reported that the verification of the license failed.'), 'SL_E_DATATYPE_MISMATCHED' : (0xC004F01E, 'The Software Licensing Service determined that the input data type does not match the data type in the license.'), 'SL_E_INVALID_LICENSE' : (0xC004F01F, 'The Software Licensing Service determined that the license is invalid.'), 'SL_E_INVALID_PACKAGE' : (0xC004F020, 'The Software Licensing Service determined that the license package is invalid.'), 'SL_E_VALIDITY_TIME_EXPIRED' : (0xC004F021, 'The Software Licensing Service reported that the validity period of the license has expired.'), 'SL_E_LICENSE_AUTHORIZATION_FAILED' : (0xC004F022, 'The Software Licensing Service reported that the license authorization failed.'), 'SL_E_LICENSE_DECRYPTION_FAILED' : (0xC004F023, 'The Software Licensing Service reported that the license is invalid.'), 'SL_E_WINDOWS_INVALID_LICENSE_STATE' : (0xC004F024, 'The Software Licensing Service reported that the license is invalid.'), 'SL_E_LUA_ACCESSDENIED' : (0xC004F025, 'The Software Licensing Service reported that the action requires administrator privilege.'), 'SL_E_PROXY_KEY_NOT_FOUND' : (0xC004F026, 'The Software Licensing Service reported that the required data is not found.'), 'SL_E_TAMPER_DETECTED' : (0xC004F027, 'The Software Licensing Service reported that the license is tampered.'), 'SL_E_POLICY_CACHE_INVALID' : (0xC004F028, 'The Software Licensing Service reported that the policy cache is invalid.'), 'SL_E_INVALID_RUNNING_MODE' : (0xC004F029, 'The Software Licensing Service cannot be started in the current OS mode.'), 'SL_E_SLP_NOT_SIGNED' : (0xC004F02A, 'The Software Licensing Service reported that the license is invalid.'), 'SL_E_CIDIID_INVALID_DATA' : (0xC004F02C, 'The Software Licensing Service reported that the format for the offline activation data is incorrect.'), 'SL_E_CIDIID_INVALID_VERSION' : (0xC004F02D, 'The Software Licensing Service determined that the version of the offline Confirmation ID (CID) is incorrect.'), 'SL_E_CIDIID_VERSION_NOT_SUPPORTED' : (0xC004F02E, 'The Software Licensing Service determined that the version of the offline Confirmation ID (CID) is not supported.'), 'SL_E_CIDIID_INVALID_DATA_LENGTH' : (0xC004F02F, 'The Software Licensing Service reported that the length of the offline Confirmation ID (CID) is incorrect.'), 'SL_E_CIDIID_NOT_DEPOSITED' : (0xC004F030, 'The Software Licensing Service determined that the Installation ID (IID) or the Confirmation ID (CID) could not been saved.'), 'SL_E_CIDIID_MISMATCHED' : (0xC004F031, 'The Installation ID (IID) and the Confirmation ID (CID) do not match. Please confirm the IID and reacquire a new CID if necessary.'), 'SL_E_INVALID_BINDING_BLOB' : (0xC004F032, 'The Software Licensing Service determined that the binding data is invalid.'), 'SL_E_PRODUCT_KEY_INSTALLATION_NOT_ALLOWED' : (0xC004F033, 'The Software Licensing Service reported that the product key is not allowed to be installed. Please see the eventlog for details.'), 'SL_E_EUL_NOT_AVAILABLE' : (0xC004F034, 'The Software Licensing Service reported that the license could not be found or was invalid.'), 'SL_E_VL_NOT_WINDOWS_SLP' : (0xC004F035, 'The Software Licensing Service reported that the computer could not be activated with a Volume license product key. Volume-licensed systems require upgrading from a qualifying operating system. Please contact your system administrator or use a different type of key.'), 'SL_E_VL_NOT_ENOUGH_COUNT' : (0xC004F038, 'The Software Licensing Service reported that the product could not be activated. The count reported by your Key Management Service (KMS) is insufficient. Please contact your system administrator.'), 'SL_E_VL_BINDING_SERVICE_NOT_ENABLED' : (0xC004F039, 'The Software Licensing Service reported that the product could not be activated. The Key Management Service (KMS) is not enabled.'), 'SL_E_VL_INFO_PRODUCT_USER_RIGHT' : (0x4004F040, 'The Software Licensing Service reported that the product was activated but the owner should verify the Product Use Rights.'), 'SL_E_VL_KEY_MANAGEMENT_SERVICE_NOT_ACTIVATED' : (0xC004F041, 'The Software Licensing Service determined that the Key Management Service (KMS) is not activated. KMS needs to be activated. Please contact system administrator.'), 'SL_E_VL_KEY_MANAGEMENT_SERVICE_ID_MISMATCH' : (0xC004F042, 'The Software Licensing Service determined that the specified Key Management Service (KMS) cannot be used.'), 'SL_E_PROXY_POLICY_NOT_UPDATED' : (0xC004F047, 'The Software Licensing Service reported that the proxy policy has not been updated.'), 'SL_E_CIDIID_INVALID_CHECK_DIGITS' : (0xC004F04D, 'The Software Licensing Service determined that the Installation ID (IID) or the Confirmation ID (CID) is invalid.'), 'SL_E_LICENSE_MANAGEMENT_DATA_NOT_FOUND' : (0xC004F04F, 'The Software Licensing Service reported that license management information was not found in the licenses.'), 'SL_E_INVALID_PRODUCT_KEY' : (0xC004F050, 'The Software Licensing Service reported that the product key is invalid.'), 'SL_E_BLOCKED_PRODUCT_KEY' : (0xC004F051, 'The Software Licensing Service reported that the product key is blocked.'), 'SL_E_DUPLICATE_POLICY' : (0xC004F052, 'The Software Licensing Service reported that the licenses contain duplicated properties.'), 'SL_E_MISSING_OVERRIDE_ONLY_ATTRIBUTE' : (0xC004F053, 'The Software Licensing Service determined that the license is invalid. The license contains an override policy that is not configured properly.'), 'SL_E_LICENSE_MANAGEMENT_DATA_DUPLICATED' : (0xC004F054, 'The Software Licensing Service reported that license management information has duplicated data.'), 'SL_E_BASE_SKU_NOT_AVAILABLE' : (0xC004F055, 'The Software Licensing Service reported that the base SKU is not available.'), 'SL_E_VL_MACHINE_NOT_BOUND' : (0xC004F056, 'The Software Licensing Service reported that the product could not be activated using the Key Management Service (KMS).'), 'SL_E_SLP_MISSING_ACPI_SLIC' : (0xC004F057, 'The Software Licensing Service reported that the computer BIOS is missing a required license.'), 'SL_E_SLP_MISSING_SLP_MARKER' : (0xC004F058, 'The Software Licensing Service reported that the computer BIOS is missing a required license.'), 'SL_E_SLP_BAD_FORMAT' : (0xC004F059, 'The Software Licensing Service reported that a license in the computer BIOS is invalid.'), 'SL_E_INVALID_PACKAGE_VERSION' : (0xC004F060, 'The Software Licensing Service determined that the version of the license package is invalid.'), 'SL_E_PKEY_INVALID_UPGRADE' : (0xC004F061, 'The Software Licensing Service determined that this specified product key can only be used for upgrading, not for clean installations.'), 'SL_E_ISSUANCE_LICENSE_NOT_INSTALLED' : (0xC004F062, 'The Software Licensing Service reported that a required license could not be found.'), 'SL_E_SLP_OEM_CERT_MISSING' : (0xC004F063, 'The Software Licensing Service reported that the computer is missing a required OEM license.'), 'SL_E_NONGENUINE_GRACE_TIME_EXPIRED' : (0xC004F064, 'The Software Licensing Service reported that the non-genuine grace period expired.'), 'SL_I_NONGENUINE_GRACE_PERIOD' : (0x4004F065, 'The Software Licensing Service reported that the application is running within the valid non-genuine grace period.'), 'SL_E_DEPENDENT_PROPERTY_NOT_SET' : (0xC004F066, 'The Software Licensing Service reported that the genuine information property can not be set before dependent property been set.'), 'SL_E_NONGENUINE_GRACE_TIME_EXPIRED_2' : (0xC004F067, 'The Software Licensing Service reported that the non-genuine grace period expired (type 2).'), 'SL_I_NONGENUINE_GRACE_PERIOD_2' : (0x4004F068, 'The Software Licensing Service reported that the application is running within the valid non-genuine grace period (type 2).'), 'SL_E_MISMATCHED_PRODUCT_SKU' : (0xC004F069, 'The Software Licensing Service reported that the product SKU is not found.'), 'SL_E_OPERATION_NOT_ALLOWED' : (0xC004F06A, 'The Software Licensing Service reported that the requested operation is not allowed.'), 'SL_E_VL_KEY_MANAGEMENT_SERVICE_VM_NOT_SUPPORTED' : (0xC004F06B, 'The Software Licensing Service determined that it is running in a virtual machine. The Key Management Service (KMS) is not supported in this mode.'), 'SL_E_VL_INVALID_TIMESTAMP' : (0xC004F06C, 'The Software Licensing Service reported that the product could not be activated. The Key Management Service (KMS) determined that the request timestamp is invalid.'), 'SL_E_PLUGIN_INVALID_MANIFEST' : (0xC004F071, 'The Software Licensing Service reported that the plug-in manifest file is incorrect.'), 'SL_E_APPLICATION_POLICIES_MISSING' : (0xC004F072, 'The Software Licensing Service reported that the license policies for fast query could not be found.'), 'SL_E_APPLICATION_POLICIES_NOT_LOADED' : (0xC004F073, 'The Software Licensing Service reported that the license policies for fast query have not been loaded.'), 'SL_E_VL_BINDING_SERVICE_UNAVAILABLE' : (0xC004F074, 'The Software Licensing Service reported that the product could not be activated. No Key Management Service (KMS) could be contacted. Please see the Application Event Log for additional information.'), 'SL_E_SERVICE_STOPPING' : (0xC004F075, 'The Software Licensing Service reported that the operation cannot be completed because the service is stopping.'), 'SL_E_PLUGIN_NOT_REGISTERED' : (0xC004F076, 'The Software Licensing Service reported that the requested plug-in cannot be found.'), 'SL_E_AUTHN_WRONG_VERSION' : (0xC004F077, 'The Software Licensing Service determined incompatible version of authentication data.'), 'SL_E_AUTHN_MISMATCHED_KEY' : (0xC004F078, 'The Software Licensing Service reported that the key is mismatched.'), 'SL_E_AUTHN_CHALLENGE_NOT_SET' : (0xC004F079, 'The Software Licensing Service reported that the authentication data is not set.'), 'SL_E_AUTHN_CANT_VERIFY' : (0xC004F07A, 'The Software Licensing Service reported that the verification could not be done.'), 'SL_E_SERVICE_RUNNING' : (0xC004F07B, 'The requested operation is unavailable while the Software Licensing Service is running.'), 'SL_E_SLP_INVALID_MARKER_VERSION' : (0xC004F07C, 'The Software Licensing Service determined that the version of the computer BIOS is invalid.'), 'SL_E_INVALID_PRODUCT_KEY_TYPE' : (0xC004F07D, 'The Software Licensing Service reported that the product key cannot be used for this type of activation.'), 'SL_E_CIDIID_MISMATCHED_PKEY' : (0xC004F07E, 'The Installation ID (IID) and the Confirmation ID (CID) do not match the product key.'), 'SL_E_CIDIID_NOT_BOUND' : (0xC004F07F, 'The Installation ID (IID) and the Confirmation ID (CID) are not bound to the current environment.'), 'SL_E_LICENSE_NOT_BOUND' : (0xC004F080, 'The Software Licensing Service reported that the license is not bound to the current environment.'), 'SL_E_VL_AD_AO_NOT_FOUND' : (0xC004F081, 'The Software Licensing Service reported that the Active Directory Activation Object could not be found or was invalid.'), 'SL_E_VL_AD_AO_NAME_TOO_LONG' : (0xC004F082, 'The Software Licensing Service reported that the name specified for the Active Directory Activation Object is too long.'), 'SL_E_VL_AD_SCHEMA_VERSION_NOT_SUPPORTED' : (0xC004F083, 'The Software Licensing Service reported that Active Directory-Based Activation is not supported in the current Active Directory schema.'), 'SL_E_NOT_GENUINE' : (0xC004F200, 'The Software Licensing Service reported that current state is not genuine.'), 'SL_E_EDITION_MISMATCHED' : (0xC004F210, 'The Software Licensing Service reported that the license edition does match the computer edition.'), 'SL_E_TKA_CHALLENGE_EXPIRED' : (0xC004F301, 'The Software Licensing Service reported that the product could not be activated. The token-based activation challenge has expired.'), 'SL_E_TKA_SILENT_ACTIVATION_FAILURE' : (0xC004F302, 'The Software Licensing Service reported that Silent Activation failed. The Software Licensing Service reported that there are no certificates found in the system that could activate the product without user interaction.'), 'SL_E_TKA_INVALID_CERT_CHAIN' : (0xC004F303, 'The Software Licensing Service reported that the certificate chain could not be built or failed validation.'), 'SL_E_TKA_GRANT_NOT_FOUND' : (0xC004F304, 'The Software Licensing Service reported that required license could not be found.'), 'SL_E_TKA_CERT_NOT_FOUND' : (0xC004F305, 'The Software Licensing Service reported that there are no certificates found in the system that could activate the product.'), 'SL_E_TKA_INVALID_SKU_ID' : (0xC004F306, 'The Software Licensing Service reported that this software edition does not support token-based activation.'), 'SL_E_TKA_INVALID_BLOB' : (0xC004F307, 'The Software Licensing Service reported that the product could not be activated. Activation data is invalid.'), 'SL_E_TKA_TAMPERED_CERT_CHAIN' : (0xC004F308, 'The Software Licensing Service reported that the product could not be activated. Activation data is tampered.'), 'SL_E_TKA_CHALLENGE_MISMATCH' : (0xC004F309, 'The Software Licensing Service reported that the product could not be activated. Activation challenge and response do not match.'), 'SL_E_TKA_INVALID_CERTIFICATE' : (0xC004F30A, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the conditions in the license.'), 'SL_E_TKA_INVALID_SMARTCARD' : (0xC004F30B, 'The Software Licensing Service reported that the inserted smartcard could not be used to activate the product.'), 'SL_E_TKA_FAILED_GRANT_PARSING' : (0xC004F30C, 'The Software Licensing Service reported that the token-based activation license content is invalid.'), 'SL_E_TKA_INVALID_THUMBPRINT' : (0xC004F30D, 'The Software Licensing Service reported that the product could not be activated. The thumbprint is invalid.'), 'SL_E_TKA_THUMBPRINT_CERT_NOT_FOUND' : (0xC004F30E, 'The Software Licensing Service reported that the product could not be activated. The thumbprint does not match any certificate.'), 'SL_E_TKA_CRITERIA_MISMATCH' : (0xC004F30F, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the criteria specified in the issuance license.'), 'SL_E_TKA_TPID_MISMATCH' : (0xC004F310, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the trust point identifier (TPID) specified in the issuance license.'), 'SL_E_TKA_SOFT_CERT_DISALLOWED' : (0xC004F311, 'The Software Licensing Service reported that the product could not be activated. A soft token cannot be used for activation.'), 'SL_E_TKA_SOFT_CERT_INVALID' : (0xC004F312, 'The Software Licensing Service reported that the product could not be activated. The certificate cannot be used because its private key is exportable.'), 'SL_E_TKA_CERT_CNG_NOT_AVAILABLE' : (0xC004F313, 'The Software Licensing Service reported that the CNG encryption library could not be loaded. The current certificate may not be available on this version of Windows.'), 'E_RM_UNKNOWN_ERROR' : (0xC004FC03, 'A networking problem has occurred while activating your copy of Windows.'), 'SL_I_TIMEBASED_VALIDITY_PERIOD' : (0x4004FC04, 'The Software Licensing Service reported that the application is running within the timebased validity period.'), 'SL_I_PERPETUAL_OOB_GRACE_PERIOD' : (0x4004FC05, 'The Software Licensing Service reported that the application has a perpetual grace period.'), 'SL_I_TIMEBASED_EXTENDED_GRACE_PERIOD' : (0x4004FC06, 'The Software Licensing Service reported that the application is running within the valid extended grace period.'), 'SL_E_VALIDITY_PERIOD_EXPIRED' : (0xC004FC07, 'The Software Licensing Service reported that the validity period expired.'), 'SL_E_IA_THROTTLE_LIMIT_EXCEEDED' : (0xC004FD00, "You've reached the request limit for automatic virtual machine activation. Try again later."), 'SL_E_IA_INVALID_VIRTUALIZATION_PLATFORM' : (0xC004FD01, "Windows isn't running on a supported Microsoft Hyper-V virtualization platform."), 'SL_E_IA_PARENT_PARTITION_NOT_ACTIVATED' : (0xC004FD02, "Windows isn't activated on the host machine. Please contact your system administrator."), 'SL_E_IA_ID_MISMATCH' : (0xC004FD03, "The host machine can't activate the edition of Windows on the virtual machine."), 'SL_E_IA_MACHINE_NOT_BOUND' : (0xC004FD04, "Windows isn't activated."), 'SL_E_TAMPER_RECOVERY_REQUIRES_ACTIVATION' : (0xC004FE00, 'The Software Licensing Service reported that activation is required to recover from tampering of SL Service trusted store.'), }
main.py
import requests import random from typing import Dict from typing import List from typing import Optional from typing import Set from typing import Tuple import psycopg2 import queue import json import threading from multiprocessing.dummy import Pool seen = queue.Queue() cores: int = 4 pool = Pool(cores) limit: int = 10000000 conn: psycopg2 = \ psycopg2.connect(user='salvadorguzman', password='', host='127.0.0.1', port='5432', database='personal') def insert_startup(cursor: psycopg2, data: List[str]) -> None: sql_insert_chann: str = f'INSERT INTO personal.startups.angel_list ' \ '(id, company_name, high_concept, product_desc, slug_url, logo_url, to_s, ' \ 'video_url, video_thumbnail, twitter_url, blog_url, company_url, ' \ 'facebook_url, linkedin_url, producthunt_url) ' \ 'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' cursor.execute(sql_insert_chann, data) def insert_null(cursor: psycopg2, num: int) -> None: sql_insert_chann: str = f'INSERT INTO personal.startups.angel_list ' \ '(id, company_name, high_concept, product_desc, slug_url, logo_url, to_s, ' \ 'video_url, video_thumbnail, twitter_url, blog_url, company_url, ' \ 'facebook_url, linkedin_url, producthunt_url) ' \ 'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' data: List[Optional[int]] = [num] + ([None] * 14) cursor.execute(sql_insert_chann, data) def none_or_str(s: str): if s is None: return None else: return s.replace('\0', '') def json_to_array(js: json) -> List[str]: return [ js['id'], none_or_str(js['company_name']), none_or_str(js['high_concept']), none_or_str(js['product_desc']), none_or_str(js['slug_url']), none_or_str(js['logo_url']), none_or_str(js['to_s']), none_or_str(js['video_url']), none_or_str(js['video_thumbnail']), none_or_str(js['twitter_url']), none_or_str(js['blog_url']), none_or_str(js['company_url']), none_or_str(js['facebook_url']), none_or_str(js['linkedin_url']), none_or_str(js['producthunt_url']), ] def print_daemon() -> None: while True: msg_payload: Tuple[int, str, bool] = seen.get(block=True) print(msg_payload) id: int = msg_payload[0] msg: str = msg_payload[1] good: bool = msg_payload[2] cursor: psycopg2 = conn.cursor() if good: js: json = json.loads(msg) arr: List[str] = json_to_array(js['startup']) insert_startup(cursor, arr) else: insert_null(cursor, id) conn.commit() cursor.close() def get_from_id(startup_id: int) -> str: url: str = f'https://angel.co/startups/{startup_id}' headers: Dict[str, str] = { 'accept': 'application/json, text/javascript, */*; q=0.01', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.9', 'cookie': '__cfduid=d3dc49bc18740a2b0facc0657340947dc1553140011; _angellist=b5825fc5399971693c4f4c72cf389cde; __cf_bm=51bfcb61da5c15821ad592b81c40b6bbf211a0ef-1553140012-1800-AUXXK8THCpdD/r9r9ghDhDK9p8yhqpTXdxY27mqDQY3W5EtAFXMuYWE44Rzfs4oXi/15JU6825T5PI6fWewiMmc=; _ga=GA1.2.28918781.1553140014; _gid=GA1.2.308356328.1553140014; _gat=1; ajs_user_id=null; ajs_group_id=null; ajs_anonymous_id=%22b5825fc5399971693c4f4c72cf389cde%22; _fbp=fb.1.1553140014995.307173727; amplitude_idundefinedangel.co=eyJvcHRPdXQiOmZhbHNlLCJzZXNzaW9uSWQiOm51bGwsImxhc3RFdmVudFRpbWUiOm51bGwsImV2ZW50SWQiOjAsImlkZW50aWZ5SWQiOjAsInNlcXVlbmNlTnVtYmVyIjowfQ==; amplitude_id_add5896bb4e577b77205df2195a968f6angel.co=eyJkZXZpY2VJZCI6ImY4ODhkYzZhLWMwNmItNGE5OC05NGJiLTYxYjdmYmQ2MDk1NlIiLCJ1c2VySWQiOm51bGwsIm9wdE91dCI6ZmFsc2UsInNlc3Npb25JZCI6MTU1MzE0MDAxNTQ5MSwibGFzdEV2ZW50VGltZSI6MTU1MzE0MDAxNTQ5MSwiZXZlbnRJZCI6MCwiaWRlbnRpZnlJZCI6MCwic2VxdWVuY2VOdW1iZXIiOjB9', 'dnt': '1', 'referer': 'https://angel.co/realscout', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36', 'x-csrf-token': '0VTeHIWpUOOsq6/acpdy6NlFeoh9WIL9isBrHpHF+EavUvkiIQSIBUA920dXB/U61QGDBdcGRLDA9UgSTf4YXg==', 'x-requested-with': 'XMLHttpRequest' } params: Dict[str, str] = { 'src': 'startup_profile_lib', 'associations[]': 'product_screenshots', 'new_startup_profile': 1 } try: r: requests.Response = requests.get(url, headers=headers, params=params, timeout=5) if r.status_code == 200: return r.text else: return "" except requests.exceptions.ReadTimeout as e: print(startup_id, e) return get_from_id(startup_id) def payload(i: int) -> None: text: str = get_from_id(i) seen.put((i, text, len(text) != 0)) def query_incumbent() -> Set[int]: cursor: psycopg2 = conn.cursor() cursor.execute('SELECT DISTINCT id FROM personal.startups.angel_list') records = cursor.fetchall() ignore: Set[int] = set() for i in records: ignore.add(i[0]) cursor.close() return ignore def main(): threading.Thread(target=print_daemon, daemon=True).start() nums_set: Set[int] = set(range(1, limit)) nums_incumbent: Set[int] = query_incumbent() print(len(nums_incumbent), 'retrieved from table') nums: List[int] = list(nums_set.difference(nums_incumbent)) random.shuffle(nums) pool.map(payload, nums) if __name__ == '__main__': main()
custom.py
# pylint: disable=too-many-lines # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import io import json import os import os.path import platform import re import ssl import stat import subprocess import sys import tempfile import threading import time import uuid import base64 import webbrowser import zipfile from distutils.version import StrictVersion from math import isnan from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error import requests from knack.log import get_logger from knack.util import CLIError from knack.prompting import prompt_pass, NoTTYException import yaml # pylint: disable=import-error from dateutil.relativedelta import relativedelta # pylint: disable=import-error from dateutil.parser import parse # pylint: disable=import-error from msrestazure.azure_exceptions import CloudError import colorama # pylint: disable=import-error from tabulate import tabulate # pylint: disable=import-error from azure.cli.core.api import get_config_dir from azure.cli.core.azclierror import ManualInterrupt, InvalidArgumentValueError, UnclassifiedUserFault, CLIInternalError, FileOperationError, ClientRequestError, DeploymentError, ValidationError, ArgumentUsageError, MutuallyExclusiveArgumentError, RequiredArgumentMissingError, ResourceNotFoundError from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.cli.core.commands import LongRunningOperation from azure.cli.core._profile import Profile from azure.graphrbac.models import (ApplicationCreateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters) from .vendored_sdks.azure_mgmt_preview_aks.v2021_05_01.models import (ContainerServiceLinuxProfile, ManagedClusterWindowsProfile, ContainerServiceNetworkProfile, ManagedClusterServicePrincipalProfile, ContainerServiceSshConfiguration, MaintenanceConfiguration, TimeInWeek, TimeSpan, ContainerServiceSshPublicKey, ManagedCluster, ManagedClusterAADProfile, ManagedClusterAddonProfile, ManagedClusterAgentPoolProfile, AgentPool, AgentPoolUpgradeSettings, ContainerServiceStorageProfileTypes, ManagedClusterIdentity, ManagedClusterAPIServerAccessProfile, ManagedClusterSKU, Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties, ManagedClusterAutoUpgradeProfile, KubeletConfig, LinuxOSConfig, ManagedClusterHTTPProxyConfig, SysctlConfig, ManagedClusterPodIdentityProfile, ManagedClusterPodIdentity, ManagedClusterPodIdentityException, UserAssignedIdentity, RunCommandRequest, ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties) from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import get_msi_client from ._client_factory import cf_resources from ._client_factory import get_resource_by_name from ._client_factory import cf_container_registry_service from ._client_factory import cf_storage from ._client_factory import cf_agent_pools from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type, _parse_comma_separated_list, _trim_fqdn_name_containing_hcp) from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided, update_load_balancer_profile, create_load_balancer_profile) from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME from ._consts import CONST_MONITORING_ADDON_NAME from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID from ._consts import CONST_MONITORING_USING_AAD_MSI_AUTH from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME from ._consts import CONST_AZURE_POLICY_ADDON_NAME from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID from ._consts import ADDONS from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE logger = get_logger(__name__) def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: # added in python 2.7.13 and 3.6 return ssl.SSLContext(ssl.PROTOCOL_TLS) except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal( cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment( cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub( '[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) # pylint: disable=too-many-locals def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.cli.core.profiles import ResourceType DeploymentProperties = cmd.get_models( 'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) properties = DeploymentProperties( template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) deployment = Deployment(properties=properties) if validate: if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment) return LongRunningOperation(cmd.cli_ctx)(validation_poller) else: return smc.validate(resource_group_name, deployment_name, deployment) return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type, key_usage=key_usage, start_date=start_date, end_date=end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError( 'specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list( filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope, resolve_assignee=is_service_principal) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope( resource_group_name, scope, assignments_client.config.subscription_id) # XXX: if role is uuid, this function's output cannot be used as role assignment defintion id # ref: https://github.com/Azure/azure-cli/issues/2458 role_id = _resolve_role_id(role, scope, definitions_client) # If the cluster has service principal resolve the service principal client id to get the object id, # if not use MSI object id. object_id = _resolve_object_id( cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters( role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None, scope=None, include_inherited=False, yes=None): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions ids = ids or [] if ids: if assignee or role or resource_group_name or scope or include_inherited: raise CLIError( 'When assignment ids are used, other parameter values are not required') for i in ids: assignments_client.delete_by_id(i) return if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]): from knack.prompting import prompt_y_n msg = 'This will delete all role assignments under the subscription. Are you sure?' if not prompt_y_n(msg, default="n"): return scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups=False) if assignments: for a in assignments: assignments_client.delete_by_id(a.id) def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups): assignee_object_id = None if assignee: assignee_object_id = _resolve_object_id(cli_ctx, assignee) # always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups if scope: assignments = list(assignments_client.list_for_scope( scope=scope, filter='atScope()')) elif assignee_object_id: if include_groups: f = "assignedTo('{}')".format(assignee_object_id) else: f = "principalId eq '{}'".format(assignee_object_id) assignments = list(assignments_client.list(filter=f)) else: assignments = list(assignments_client.list()) if assignments: assignments = [a for a in assignments if ( not scope or include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or _get_role_property(a, 'scope').lower() == scope.lower() )] if role: role_id = _resolve_role_id(role, scope, definitions_client) assignments = [i for i in assignments if _get_role_property( i, 'role_definition_id') == role_id] if assignee_object_id: assignments = [i for i in assignments if _get_role_property( i, 'principal_id') == assignee_object_id] return assignments def _get_role_property(obj, property_name): if isinstance(obj, dict): return obj[property_name] return getattr(obj, property_name) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list( scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) if len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list( filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError( "No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False _re_user_assigned_identity_resource_id = re.compile( r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)', flags=re.IGNORECASE) def _get_user_assigned_identity(cli_ctx, resource_id): resource_id = resource_id.lower() match = _re_user_assigned_identity_resource_id.search(resource_id) if match: subscription_id = match.group(1) resource_group_name = match.group(2) identity_name = match.group(3) msi_client = get_msi_client(cli_ctx, subscription_id) try: identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name, resource_name=identity_name) except CloudError as ex: if 'was not found' in ex.message: raise CLIError("Identity {} not found.".format(resource_id)) raise CLIError(ex.message) return identity raise CLIError( "Cannot parse identity name from provided resource id {}.".format(resource_id)) def _get_user_assigned_identity_client_id(cli_ctx, resource_id): return _get_user_assigned_identity(cli_ctx, resource_id).client_id def _get_user_assigned_identity_object_id(cli_ctx, resource_id): return _get_user_assigned_identity(cli_ctx, resource_id).principal_id def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches client, resource_group_name, name, disable_browser=False, listen_address='127.0.0.1', listen_port='8001'): # verify the kube-dashboard addon was not disabled instance = client.get(resource_group_name, name) addon_profiles = instance.addon_profiles or {} # addon name is case insensitive addon_profile = next((addon_profiles[k] for k in addon_profiles if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()), ManagedClusterAddonProfile(enabled=False)) # open portal view if addon is not enabled or k8s version >= 1.19.0 if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled): subscription_id = get_subscription_id(cmd.cli_ctx) dashboardURL = ( # Azure Portal URL (https://portal.azure.com for public cloud) cmd.cli_ctx.cloud.endpoints.portal + ('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' '/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name) ) if in_cloud_console(): logger.warning( 'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL) else: logger.warning('Kubernetes resources view on %s', dashboardURL) if not disable_browser: webbrowser.open_new_tab(dashboardURL) return # otherwise open the kube-dashboard addon if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # find the port try: dashboard_port = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--selector", "k8s-app=kubernetes-dashboard", "--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"] ) # output format: b"'{port}'" dashboard_port = int((dashboard_port.decode('utf-8').replace("'", ""))) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard port: {}'.format(err)) # use https if dashboard container is using https if dashboard_port == 8443: protocol = 'https' else: protocol = 'http' proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port) dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url, protocol) # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post( 'http://localhost:8888/openport/{0}'.format(listen_port)) result = json.loads(response.text) dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format( result['url'], protocol) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id), json={"url": dashboardURL}) logger.warning( 'To view the console, please open %s in a new tab', dashboardURL) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(dashboardURL) try: try: subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address", listen_address, "--port", listen_port], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: if err.output.find(b'unknown flag: --address'): if listen_address != '127.0.0.1': logger.warning( '"--address" is only supported in kubectl v1.13 and later.') logger.warning( 'The "--listen-address" argument will be ignored.') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: if in_cloud_console(): requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _add_monitoring_role_assignment(result, cluster_resource_id, cmd): service_principal_msi_id = None # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id != 'msi' ): logger.info('valid service principal exists, using it') service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (CONST_MONITORING_ADDON_NAME in result.addon_profiles) and (hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and (hasattr( result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id')) ): logger.info('omsagent MSI exists, using it') service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id is_service_principal = False if service_principal_msi_id is not None: if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_msi_id, is_service_principal, scope=cluster_resource_id): logger.warning('Could not create a role assignment for Monitoring addon. ' 'Are you an Owner on this subscription?') else: logger.warning('Could not find service principal or user assigned MSI for role' 'assignment') def _add_ingress_appgw_addon_role_assignment(result, cmd): service_principal_msi_id = None # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id != 'msi' ): service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and (hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and (hasattr( result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id')) ): service_principal_msi_id = result.addon_profiles[ CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id is_service_principal = False if service_principal_msi_id is not None: config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config from msrestazure.tools import parse_resource_id, resource_id if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config: appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] parsed_appgw_id = parse_resource_id(appgw_id) appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"], resource_group=parsed_appgw_id["resource_group"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=appgw_group_id): logger.warning('Could not create a role assignment for application gateway: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME) if CONST_INGRESS_APPGW_SUBNET_ID in config: subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID] if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', service_principal_msi_id, is_service_principal, scope=subnet_id): logger.warning('Could not create a role assignment for subnet: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME) if CONST_INGRESS_APPGW_SUBNET_CIDR in config: if result.agent_pool_profiles[0].vnet_subnet_id is not None: parsed_subnet_vnet_id = parse_resource_id( result.agent_pool_profiles[0].vnet_subnet_id) vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"], resource_group=parsed_subnet_vnet_id["resource_group"], namespace="Microsoft.Network", type="virtualNetworks", name=parsed_subnet_vnet_id["name"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=vnet_id): logger.warning('Could not create a role assignment for virtual network: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME) def aks_maintenanceconfiguration_list( cmd, client, resource_group_name, cluster_name ): return client.list_by_managed_cluster(resource_group_name, cluster_name) def aks_maintenanceconfiguration_show( cmd, client, resource_group_name, cluster_name, config_name ): logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ', resource_group_name, cluster_name, config_name) return client.get(resource_group_name, cluster_name, config_name) def aks_maintenanceconfiguration_delete( cmd, client, resource_group_name, cluster_name, config_name ): logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ', resource_group_name, cluster_name, config_name) return client.delete(resource_group_name, cluster_name, config_name) def aks_maintenanceconfiguration_add( cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour ): configs = client.list_by_managed_cluster(resource_group_name, cluster_name) for config in configs: if config.name == config_name: raise CLIError("Maintenance configuration '{}' already exists, please try a different name, " "use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name)) return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour) def aks_maintenanceconfiguration_update( cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour ): configs = client.list_by_managed_cluster(resource_group_name, cluster_name) found = False for config in configs: if config.name == config_name: found = True break if not found: raise CLIError("Maintenance configuration '{}' doesn't exist." "use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name)) return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour) def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches client, resource_group_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", windows_admin_username=None, windows_admin_password=None, enable_ahub=False, kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_type=None, node_osdisk_size=0, node_osdisk_diskencryptionset_id=None, node_count=3, nodepool_name="nodepool1", nodepool_tags=None, nodepool_labels=None, service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, enable_vmss=None, vm_set_type=None, skip_subnet_role_assignment=False, os_sku=None, enable_fips_image=False, enable_cluster_autoscaler=False, cluster_autoscaler_profile=None, network_plugin=None, network_policy=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, load_balancer_sku=None, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, outbound_type=None, enable_addons=None, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, min_count=None, max_count=None, vnet_subnet_id=None, pod_subnet_id=None, ppg=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, node_zones=None, enable_node_public_ip=False, node_public_ip_prefix_id=None, generate_ssh_keys=False, # pylint: disable=unused-argument enable_pod_security_policy=False, node_resource_group=None, uptime_sla=False, attach_acr=None, enable_private_cluster=False, private_dns_zone=None, enable_managed_identity=True, fqdn_subdomain=None, enable_public_fqdn=False, api_server_authorized_ip_ranges=None, aks_custom_headers=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_aad=False, enable_azure_rbac=False, aad_admin_group_object_ids=None, aci_subnet_name=None, enable_sgxquotehelper=False, kubelet_config=None, linux_os_config=None, http_proxy_config=None, assign_identity=None, auto_upgrade_channel=None, enable_pod_identity=False, enable_pod_identity_with_kubenet=False, enable_encryption_at_host=False, enable_ultra_ssd=False, enable_secret_rotation=False, disable_local_accounts=False, no_wait=False, assign_kubelet_identity=None, yes=False): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError( 'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) subscription_id = get_subscription_id(cmd.cli_ctx) if dns_name_prefix and fqdn_subdomain: raise CLIError( '--dns-name-prefix and --fqdn-subdomain cannot be used at same time') if not dns_name_prefix and not fqdn_subdomain: dns_name_prefix = _get_default_dns_prefix( name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # Flag to be removed, kept for back-compatibility only. Remove the below section # when we deprecate the enable-vmss flag if enable_vmss: if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower(): raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'. format(vm_set_type)) vm_set_type = "VirtualMachineScaleSets" vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version) load_balancer_sku = set_load_balancer_sku( load_balancer_sku, kubernetes_version) if api_server_authorized_ip_ranges and load_balancer_sku == "basic": raise CLIError( '--api-server-authorized-ip-ranges can only be used with standard load balancer') agent_pool_profile = ManagedClusterAgentPoolProfile( # Must be 12 chars or less before ACS RP adds to it name=_trim_nodepoolname(nodepool_name), tags=nodepool_tags, node_labels=nodepool_labels, count=int(node_count), vm_size=node_vm_size, os_type="Linux", os_sku=os_sku, mode="System", vnet_subnet_id=vnet_subnet_id, pod_subnet_id=pod_subnet_id, proximity_placement_group_id=ppg, availability_zones=node_zones, enable_node_public_ip=enable_node_public_ip, enable_fips=enable_fips_image, node_public_ip_prefix_id=node_public_ip_prefix_id, enable_encryption_at_host=enable_encryption_at_host, enable_ultra_ssd=enable_ultra_ssd, max_pods=int(max_pods) if max_pods else None, type=vm_set_type ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) if node_osdisk_type: agent_pool_profile.os_disk_type = node_osdisk_type _check_cluster_autoscaler_flag( enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile) if kubelet_config: agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config) if linux_os_config: agent_pool_profile.linux_os_config = _get_linux_os_config( linux_os_config) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile( admin_username=admin_username, ssh=ssh_config) windows_profile = None if windows_admin_username: if windows_admin_password is None: try: windows_admin_password = prompt_pass( msg='windows-admin-password: ', confirm=True) except NoTTYException: raise CLIError( 'Please specify both username and password in non-interactive mode.') windows_license_type = None if enable_ahub: windows_license_type = 'Windows_Server' windows_profile = ManagedClusterWindowsProfile( admin_username=windows_admin_username, admin_password=windows_admin_password, license_type=windows_license_type) service_principal_profile = None principal_obj = None # If customer explicitly provides a service principal, disable managed identity. if service_principal and client_secret: enable_managed_identity = False if not enable_managed_identity: principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, fqdn_subdomain=fqdn_subdomain, location=location, name=name) service_principal_profile = ManagedClusterServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret")) if attach_acr: if enable_managed_identity: if no_wait: raise CLIError('When --attach-acr and --enable-managed-identity are both specified, ' '--no-wait is not allowed, please wait until the whole operation succeeds.') else: _ensure_aks_acr(cmd.cli_ctx, client_id=service_principal_profile.client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) need_post_creation_vnet_permission_granting = False if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): # if service_principal_profile is None, then this cluster is an MSI cluster, # and the service principal does not exist. Two cases: # 1. For system assigned identity, we just tell user to grant the # permission after the cluster is created to keep consistent with portal experience. # 2. For user assigned identity, we can grant needed permission to # user provided user assigned identity before creating managed cluster. if service_principal_profile is None and not assign_identity: msg = ('It is highly recommended to use USER assigned identity ' '(option --assign-identity) when you want to bring your own' 'subnet, which will have no latency for the role assignment to ' 'take effect. When using SYSTEM assigned identity, ' 'azure-cli will grant Network Contributor role to the ' 'system assigned identity after the cluster is created, and ' 'the role assignment will take some time to take effect, see ' 'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, ' 'proceed to create cluster with system assigned identity?') from knack.prompting import prompt_y_n if not yes and not prompt_y_n(msg, default="n"): return None need_post_creation_vnet_permission_granting = True else: scope = vnet_subnet_id identity_client_id = "" if assign_identity: identity_client_id = _get_user_assigned_identity_client_id( cmd.cli_ctx, assign_identity) else: identity_client_id = service_principal_profile.client_id if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', identity_client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') load_balancer_profile = create_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) outbound_type = _set_outbound_type( outbound_type, network_plugin, load_balancer_sku, load_balancer_profile) network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]): if not network_plugin: raise CLIError('Please explicitly specify the network plugin type') if pod_cidr and network_plugin == "azure": raise CLIError( 'Please use kubenet as the network plugin type when pod_cidr is specified') network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address, network_policy=network_policy, load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type ) else: if load_balancer_sku.lower() == "standard" or load_balancer_profile: network_profile = ContainerServiceNetworkProfile( network_plugin="kubenet", load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type, ) if load_balancer_sku.lower() == "basic": network_profile = ContainerServiceNetworkProfile( load_balancer_sku=load_balancer_sku.lower(), ) addon_profiles = _handle_addons_args( cmd=cmd, addons_str=enable_addons, subscription_id=subscription_id, resource_group_name=resource_group_name, addon_profiles={}, workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, aci_subnet_name=aci_subnet_name, vnet_subnet_id=vnet_subnet_id, enable_secret_rotation=enable_secret_rotation, ) monitoring = False if CONST_MONITORING_ADDON_NAME in addon_profiles: monitoring = True if enable_msi_auth_for_monitoring and not enable_managed_identity: raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.") _ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, location, aad_route=enable_msi_auth_for_monitoring, create_dcr=True, create_dcra=False) # addon is in the list and is enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \ addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' enable_virtual_node = False if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles: enable_virtual_node = True aad_profile = None if enable_aad: if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): raise CLIError('"--enable-aad" cannot be used together with ' '"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"') if disable_rbac and enable_azure_rbac: raise CLIError( '"--enable-azure-rbac" can not be used together with "--disable-rbac"') aad_profile = ManagedClusterAADProfile( managed=True, enable_azure_rbac=enable_azure_rbac, # ids -> i_ds due to track 2 naming issue admin_group_object_i_ds=_parse_comma_separated_list( aad_admin_group_object_ids), tenant_id=aad_tenant_id ) else: if aad_admin_group_object_ids is not None: raise CLIError( '"--admin-aad-object-id" can only be used together with "--enable-aad"') if enable_azure_rbac is True: raise CLIError( '"--enable-azure-rbac" can only be used together with "--enable-aad"') if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError( 'specify either "--disable-rbac" or "--enable-rbac", not both.') api_server_access_profile = None if api_server_authorized_ip_ranges: api_server_access_profile = _populate_api_server_access_profile( api_server_authorized_ip_ranges) identity = None if not enable_managed_identity and assign_identity: raise CLIError( '--assign-identity can only be specified when --enable-managed-identity is specified') if enable_managed_identity and not assign_identity: identity = ManagedClusterIdentity( type="SystemAssigned" ) elif enable_managed_identity and assign_identity: user_assigned_identity = { assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties() } identity = ManagedClusterIdentity( type="UserAssigned", user_assigned_identities=user_assigned_identity ) identity_profile = None if assign_kubelet_identity: if not assign_identity: raise CLIError('--assign-kubelet-identity can only be specified when --assign-identity is specified') kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity) identity_profile = { 'kubeletidentity': ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties( resource_id=assign_kubelet_identity, client_id=kubelet_identity.client_id, object_id=kubelet_identity.principal_id ) } cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity) # ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity _ensure_cluster_identity_permission_on_kubelet_identity(cmd.cli_ctx, cluster_identity_object_id, assign_kubelet_identity) pod_identity_profile = None if enable_pod_identity: if not enable_managed_identity: raise CLIError( '--enable-pod-identity can only be specified when --enable-managed-identity is specified') pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True) _ensure_pod_identity_kubenet_consent( network_profile, pod_identity_profile, enable_pod_identity_with_kubenet) enable_rbac = True if disable_rbac: enable_rbac = False auto_upgrade_profile = None if auto_upgrade_channel is not None: auto_upgrade_profile = ManagedClusterAutoUpgradeProfile( upgrade_channel=auto_upgrade_channel) mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=enable_rbac, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, windows_profile=windows_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile, auto_scaler_profile=cluster_autoscaler_profile, enable_pod_security_policy=bool(enable_pod_security_policy), identity=identity, disk_encryption_set_id=node_osdisk_diskencryptionset_id, api_server_access_profile=api_server_access_profile, auto_upgrade_profile=auto_upgrade_profile, pod_identity_profile=pod_identity_profile, identity_profile=identity_profile, disable_local_accounts=bool(disable_local_accounts)) if node_resource_group: mc.node_resource_group = node_resource_group use_custom_private_dns_zone = False if not enable_private_cluster and enable_public_fqdn: raise ArgumentUsageError("--enable-public-fqdn should only be used with --enable-private-cluster") if enable_private_cluster: if load_balancer_sku.lower() != "standard": raise ArgumentUsageError( "Please use standard load balancer for private cluster") mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile( enable_private_cluster=True ) if enable_public_fqdn: mc.api_server_access_profile.enable_private_cluster_public_fqdn = True if private_dns_zone: if not enable_private_cluster: raise ArgumentUsageError( "Invalid private dns zone for public cluster. It should always be empty for public cluster") mc.api_server_access_profile.private_dns_zone = private_dns_zone from msrestazure.tools import is_valid_resource_id if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM and private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_NONE: if is_valid_resource_id(private_dns_zone): use_custom_private_dns_zone = True else: raise ResourceNotFoundError(private_dns_zone + " is not a valid Azure resource ID.") if fqdn_subdomain: if not use_custom_private_dns_zone: raise ArgumentUsageError( "--fqdn-subdomain should only be used for private cluster with custom private dns zone") mc.fqdn_subdomain = fqdn_subdomain if http_proxy_config: mc.http_proxy_config = _get_http_proxy_config(http_proxy_config) if uptime_sla: mc.sku = ManagedClusterSKU( name="Basic", tier="Paid" ) headers = get_aks_custom_headers(aks_custom_headers) # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: if monitoring and enable_msi_auth_for_monitoring: # Creating a DCR Association (for the monitoring addon) requires waiting for cluster creation to finish no_wait = False created_cluster = _put_managed_cluster_ensuring_permission( cmd, client, subscription_id, resource_group_name, name, mc, monitoring, ingress_appgw_addon_enabled, enable_virtual_node, need_post_creation_vnet_permission_granting, vnet_subnet_id, enable_managed_identity, attach_acr, headers, no_wait) if monitoring and enable_msi_auth_for_monitoring: # Create the DCR Association here _ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, location, aad_route=enable_msi_auth_for_monitoring, create_dcr=False, create_dcra=True) return created_cluster except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals client, resource_group_name, name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, cluster_autoscaler_profile=None, min_count=None, max_count=None, no_wait=False, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, api_server_authorized_ip_ranges=None, enable_pod_security_policy=False, disable_pod_security_policy=False, attach_acr=None, detach_acr=None, uptime_sla=False, no_uptime_sla=False, enable_aad=False, aad_tenant_id=None, aad_admin_group_object_ids=None, enable_ahub=False, disable_ahub=False, aks_custom_headers=None, auto_upgrade_channel=None, enable_managed_identity=False, assign_identity=None, enable_pod_identity=False, enable_pod_identity_with_kubenet=False, disable_pod_identity=False, enable_secret_rotation=False, disable_secret_rotation=False, disable_local_accounts=False, enable_local_accounts=False, enable_public_fqdn=False, disable_public_fqdn=False, yes=False, tags=None, windows_admin_password=None, enable_azure_rbac=False, disable_azure_rbac=False): update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler update_acr = attach_acr is not None or detach_acr is not None update_pod_security = enable_pod_security_policy or disable_pod_security_policy update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) update_aad_profile = not ( aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac) # pylint: disable=too-many-boolean-expressions if not update_autoscaler and \ cluster_autoscaler_profile is None and \ not update_acr and \ not update_lb_profile \ and api_server_authorized_ip_ranges is None and \ not update_pod_security and \ not update_lb_profile and \ not uptime_sla and \ not no_uptime_sla and \ not enable_aad and \ not update_aad_profile and \ not enable_ahub and \ not disable_ahub and \ not auto_upgrade_channel and \ not enable_managed_identity and \ not assign_identity and \ not enable_pod_identity and \ not disable_pod_identity and \ not enable_secret_rotation and \ not disable_secret_rotation and \ not tags and \ not windows_admin_password and \ not enable_local_accounts and \ not disable_local_accounts and \ not enable_public_fqdn and \ not disable_public_fqdn: raise CLIError('Please specify "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--cluster-autoscaler-profile" or ' '"--enable-pod-security-policy" or ' '"--disable-pod-security-policy" or ' '"--api-server-authorized-ip-ranges" or ' '"--attach-acr" or ' '"--detach-acr" or ' '"--uptime-sla" or ' '"--no-uptime-sla" or ' '"--load-balancer-managed-outbound-ip-count" or ' '"--load-balancer-outbound-ips" or ' '"--load-balancer-outbound-ip-prefixes" or ' '"--enable-aad" or ' '"--aad-tenant-id" or ' '"--aad-admin-group-object-ids" or ' '"--enable-ahub" or ' '"--disable-ahub" or ' '"--enable-managed-identity" or ' '"--enable-pod-identity" or ' '"--disable-pod-identity" or ' '"--auto-upgrade-channel" or ' '"--enable-secret-rotation" or ' '"--disable-secret-rotation" or ' '"--tags" or ' '"--windows-admin-password" or ' '"--enable-azure-rbac" or ' '"--disable-azure-rbac" or ' '"--enable-local-accounts" or ' '"--disable-local-accounts" or ' '"--enable-public-fqdn" or ' '"--disable-public-fqdn"') instance = client.get(resource_group_name, name) if update_autoscaler and len(instance.agent_pool_profiles) > 1: raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command ' 'to update per node pool auto scaler settings') if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count.') if enable_cluster_autoscaler: if instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n' 'Please run "az aks update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) instance.agent_pool_profiles[0].enable_auto_scaling = True if update_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n' 'Run "az aks update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) if disable_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning( 'Cluster autoscaler is already disabled for this managed cluster.') return None instance.agent_pool_profiles[0].enable_auto_scaling = False instance.agent_pool_profiles[0].min_count = None instance.agent_pool_profiles[0].max_count = None # if intention is to clear profile if cluster_autoscaler_profile == {}: instance.auto_scaler_profile = {} # else profile is provided, update instance profile if it exists elif cluster_autoscaler_profile: instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__, dict((key.replace("-", "_"), value) for (key, value) in cluster_autoscaler_profile.items())) \ if instance.auto_scaler_profile else cluster_autoscaler_profile if enable_pod_security_policy and disable_pod_security_policy: raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy ' 'at the same time.') if enable_pod_security_policy: instance.enable_pod_security_policy = True if disable_pod_security_policy: instance.enable_pod_security_policy = False if disable_local_accounts and enable_local_accounts: raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts ' 'at the same time.') if disable_local_accounts: instance.disable_local_accounts = True if enable_local_accounts: instance.disable_local_accounts = False if update_lb_profile: instance.network_profile.load_balancer_profile = update_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout, instance.network_profile.load_balancer_profile) if attach_acr and detach_acr: raise CLIError( 'Cannot specify "--attach-acr" and "--detach-acr" at the same time.') if uptime_sla and no_uptime_sla: raise CLIError( 'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.') if uptime_sla: instance.sku = ManagedClusterSKU( name="Basic", tier="Paid" ) if no_uptime_sla: instance.sku = ManagedClusterSKU( name="Basic", tier="Free" ) subscription_id = get_subscription_id(cmd.cli_ctx) client_id = "" if _is_msi_cluster(instance): if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None: raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. ' 'Please do not set --attach-acr or --detach-acr. ' 'You can manually grant or revoke permission to the identity named ' '<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.') client_id = instance.identity_profile["kubeletidentity"].client_id else: client_id = instance.service_principal_profile.client_id if not client_id: raise CLIError('Cannot get the AKS cluster\'s service principal.') if attach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if detach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=detach_acr, subscription_id=subscription_id, detach=True) # empty string is valid as it disables ip whitelisting if api_server_authorized_ip_ranges is not None: instance.api_server_access_profile = \ _populate_api_server_access_profile( api_server_authorized_ip_ranges, instance) if enable_aad: if instance.aad_profile is not None and instance.aad_profile.managed: raise CLIError( 'Cannot specify "--enable-aad" if managed AAD is already enabled') instance.aad_profile = ManagedClusterAADProfile( managed=True ) if update_aad_profile: if instance.aad_profile is None or not instance.aad_profile.managed: raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"' ' if managed AAD is not enabled') if aad_tenant_id is not None: instance.aad_profile.tenant_id = aad_tenant_id if aad_admin_group_object_ids is not None: # ids -> i_ds due to track 2 naming issue instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list( aad_admin_group_object_ids) if enable_azure_rbac and disable_azure_rbac: raise CLIError( 'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time') if enable_azure_rbac: instance.aad_profile.enable_azure_rbac = True if disable_azure_rbac: instance.aad_profile.enable_azure_rbac = False if enable_ahub and disable_ahub: raise CLIError( 'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time') if enable_ahub: instance.windows_profile.license_type = 'Windows_Server' if disable_ahub: instance.windows_profile.license_type = 'None' if enable_public_fqdn and disable_public_fqdn: raise MutuallyExclusiveArgumentError( 'Cannot specify "--enable-public-fqdn" and "--disable-public-fqdn" at the same time') is_private_cluster = instance.api_server_access_profile is not None and instance.api_server_access_profile.enable_private_cluster if enable_public_fqdn: if not is_private_cluster: raise ArgumentUsageError('--enable-public-fqdn can only be used for private cluster') instance.api_server_access_profile.enable_private_cluster_public_fqdn = True if disable_public_fqdn: if not is_private_cluster: raise ArgumentUsageError('--disable-public-fqdn can only be used for private cluster') if instance.api_server_access_profile.private_dns_zone.lower() == CONST_PRIVATE_DNS_ZONE_NONE: raise ArgumentUsageError('--disable-public-fqdn cannot be applied for none mode private dns zone cluster') instance.api_server_access_profile.enable_private_cluster_public_fqdn = False if instance.auto_upgrade_profile is None: instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile() if auto_upgrade_channel is not None: instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel if not enable_managed_identity and assign_identity: raise CLIError( '--assign-identity can only be specified when --enable-managed-identity is specified') current_identity_type = "spn" if instance.identity is not None: current_identity_type = instance.identity.type.casefold() goal_identity_type = current_identity_type if enable_managed_identity: if not assign_identity: goal_identity_type = "systemassigned" else: goal_identity_type = "userassigned" if current_identity_type != goal_identity_type: from knack.prompting import prompt_y_n msg = "" if current_identity_type == "spn": msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n' 'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet ' 'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n ' 'Are you sure you want to perform this operation?').format(goal_identity_type) else: msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n' 'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type) if not yes and not prompt_y_n(msg, default="n"): return None if goal_identity_type == "systemassigned": instance.identity = ManagedClusterIdentity( type="SystemAssigned" ) elif goal_identity_type == "userassigned": user_assigned_identity = { assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties() } instance.identity = ManagedClusterIdentity( type="UserAssigned", user_assigned_identities=user_assigned_identity ) if enable_pod_identity: if not _is_pod_identity_addon_enabled(instance): # we only rebuild the pod identity profile if it's disabled before _update_addon_pod_identity( instance, enable=True, allow_kubenet_consent=enable_pod_identity_with_kubenet, ) if disable_pod_identity: _update_addon_pod_identity(instance, enable=False) azure_keyvault_secrets_provider_addon_profile = None monitoring_addon_enabled = False ingress_appgw_addon_enabled = False virtual_node_addon_enabled = False if instance.addon_profiles is not None: azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None) azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \ instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled if enable_secret_rotation: if not azure_keyvault_secrets_provider_enabled: raise CLIError( '--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled') azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" if disable_secret_rotation: if not azure_keyvault_secrets_provider_enabled: raise CLIError( '--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled') azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false" if tags: instance.tags = tags if windows_admin_password: instance.windows_profile.admin_password = windows_admin_password headers = get_aks_custom_headers(aks_custom_headers) return _put_managed_cluster_ensuring_permission(cmd, client, subscription_id, resource_group_name, name, instance, monitoring_addon_enabled, ingress_appgw_addon_enabled, virtual_node_addon_enabled, False, instance.agent_pool_profiles[0].vnet_subnet_id, _is_msi_cluster(instance), attach_acr, headers, no_wait) def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) if managed_cluster.agent_pool_profiles is not None: for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def aks_get_credentials(cmd, # pylint: disable=unused-argument client, resource_group_name, name, admin=False, user='clusterUser', path=os.path.join(os.path.expanduser( '~'), '.kube', 'config'), overwrite_existing=False, context_name=None, public_fqdn=False): credentialResults = None serverType = None if public_fqdn: serverType = 'public' if admin: credentialResults = client.list_cluster_admin_credentials( resource_group_name, name, serverType) else: if user.lower() == 'clusteruser': credentialResults = client.list_cluster_user_credentials( resource_group_name, name, serverType) elif user.lower() == 'clustermonitoringuser': credentialResults = client.list_cluster_monitoring_user_credentials( resource_group_name, name, serverType) else: raise CLIError("The user is invalid.") if not credentialResults: raise CLIError("No Kubernetes credentials found.") try: kubeconfig = credentialResults.kubeconfigs[0].value.decode( encoding='UTF-8') _print_or_merge_credentials( path, kubeconfig, overwrite_existing, context_name) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") # pylint: disable=line-too-long def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals client, resource_group_name, name, storage_account=None, sas_token=None, container_logs=None, kube_objects=None, node_logs=None): colorama.init() mc = client.get(resource_group_name, name) if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') storage_account_id = None if storage_account is None: print("No storage account specified. Try getting storage account from diagnostic settings") storage_account_id = get_storage_account_from_diag_settings( cmd.cli_ctx, resource_group_name, name) if storage_account_id is None: raise CLIError( "A storage account must be specified, since there isn't one in the diagnostic settings.") from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id if storage_account_id is None: if not is_valid_resource_id(storage_account): storage_account_id = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Storage', type='storageAccounts', name=storage_account ) else: storage_account_id = storage_account if is_valid_resource_id(storage_account_id): try: parsed_storage_account = parse_resource_id(storage_account_id) except CloudError as ex: raise CLIError(ex.message) else: raise CLIError("Invalid storage account id %s" % storage_account_id) storage_account_name = parsed_storage_account['name'] readonly_sas_token = None if sas_token is None: storage_client = cf_storage( cmd.cli_ctx, parsed_storage_account['subscription']) storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'], storage_account_name) kwargs = { 'account_name': storage_account_name, 'account_key': storage_account_keys.keys[0].value } cloud_storage_client = cloud_storage_account_service_factory( cmd.cli_ctx, kwargs) sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rwdlacup', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = cloud_storage_client.generate_shared_access_signature( 'b', 'sco', 'rl', datetime.datetime.utcnow() + datetime.timedelta(days=1)) readonly_sas_token = readonly_sas_token.strip('?') from knack.prompting import prompt_y_n print() print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and ' f'save them to the storage account ' f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as ' f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.') print() print('If you share access to that storage account to Azure support, you consent to the terms outlined' f' in {format_hyperlink("http://aka.ms/DiagConsent")}.') print() if not prompt_y_n('Do you confirm?', default="n"): return print() print("Getting credentials for cluster %s " % name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) print() print("Starts collecting diag info for cluster %s " % name) sas_token = sas_token.strip('?') deployment_yaml = urlopen( "https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode() deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>", (base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii')) deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>", (base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii')) yaml_lines = deployment_yaml.splitlines() for index, line in enumerate(yaml_lines): if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None: yaml_lines[index] = line + ' ' + container_logs if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None: yaml_lines[index] = line + ' ' + kube_objects if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None: yaml_lines[index] = line + ' ' + node_logs deployment_yaml = '\n'.join(yaml_lines) fd, temp_yaml_path = tempfile.mkstemp() temp_yaml_file = os.fdopen(fd, 'w+t') try: temp_yaml_file.write(deployment_yaml) temp_yaml_file.flush() temp_yaml_file.close() try: print() print("Cleaning up aks-periscope resources if existing") subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "serviceaccount,configmap,daemonset,secret", "--all", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRoleBinding", "aks-periscope-role-binding-view", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "ClusterRole", "aks-periscope-role", "--ignore-not-found"], stderr=subprocess.STDOUT) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "--all", "apd", "-n", "aks-periscope", "--ignore-not-found"], stderr=subprocess.DEVNULL) subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete", "CustomResourceDefinition", "diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"], stderr=subprocess.STDOUT) print() print("Deploying aks-periscope") subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f", temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: raise CLIError(err.output) finally: os.remove(temp_yaml_path) print() fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn normalized_fqdn = fqdn.replace('.', '-') token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \ f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}" print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}') print() print(f'You can download Azure Stroage Explorer here ' f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}' f' to check the logs by adding the storage account using the following URL:') print(f'{format_hyperlink(log_storage_account_url)}') print() if not prompt_y_n('Do you want to see analysis results now?', default="n"): print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' " f"anytime to check the analysis results.") else: display_diagnostics_report(temp_kubeconfig_path) def aks_kanalyze(cmd, client, resource_group_name, name): colorama.init() client.get(resource_group_name, name) _, temp_kubeconfig_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path) display_diagnostics_report(temp_kubeconfig_path) def aks_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) if len(instance.agent_pool_profiles) > 1 and nodepool_name == "": raise CLIError('There are more than one node pool in the cluster. ' 'Please specify nodepool name or use az aks nodepool command to scale node pool') for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): if agent_profile.enable_auto_scaling: raise CLIError( "Cannot scale cluster autoscaler enabled node pool.") agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements client, resource_group_name, name, kubernetes_version='', control_plane_only=False, no_wait=False, node_image_only=False, aks_custom_headers=None, yes=False): from knack.prompting import prompt_y_n msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?' if not yes and not prompt_y_n(msg, default="n"): return None instance = client.get(resource_group_name, name) vmas_cluster = False for agent_profile in instance.agent_pool_profiles: if agent_profile.type.lower() == "availabilityset": vmas_cluster = True break if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. ' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: msg = "This node image upgrade operation will run across every node pool in the cluster" \ "and might take a while, do you wish to continue?" if not yes and not prompt_y_n(msg, default="n"): return None # This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all # nodepools of a cluster. The SDK only support upgrade single nodepool at a time. for agent_pool_profile in instance.agent_pool_profiles: if vmas_cluster: raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation ' 'can only be applied on VirtualMachineScaleSets cluster.') agent_pool_client = cf_agent_pools(cmd.cli_ctx) _upgrade_single_nodepool_image_version( True, agent_pool_client, resource_group_name, name, agent_pool_profile.name) mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) upgrade_all = False instance.kubernetes_version = kubernetes_version # for legacy clusters, we always upgrade node pools with CCP. if instance.max_agent_pools < 8 or vmas_cluster: if control_plane_only: msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be " "upgraded to {} as well. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: if not control_plane_only: msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane " "AND all nodepools to version {}. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. " "Node pool will not change. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None if upgrade_all: for agent_profile in instance.agent_pool_profiles: agent_profile.orchestrator_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers) def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None): colorama.init() mc = client.get(resource_group_name, name) if not command_string: raise CLIError('Command cannot be empty.') request_payload = RunCommandRequest(command=command_string) request_payload.context = _get_command_context(command_files) if mc.aad_profile is not None and mc.aad_profile.managed: request_payload.cluster_token = _get_dataplane_aad_token( cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630") commandResultFuture = client.begin_run_command( resource_group_name, name, request_payload, polling_interval=5, retry_total=0) return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300)) def aks_command_result(cmd, client, resource_group_name, name, command_id=""): if not command_id: raise CLIError('CommandID cannot be empty.') commandResult = client.get_command_result( resource_group_name, name, command_id) return _print_command_result(cmd.cli_ctx, commandResult) def _print_command_result(cli_ctx, commandResult): # cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value. # cli core also use this calculate ParameterSetName header for all http request from cli. if cli_ctx.data['safe_params'] is None or "-o" in cli_ctx.data['safe_params'] or "--output" in cli_ctx.data['safe_params']: # user specified output format, honor their choice, return object to render pipeline return commandResult else: # user didn't specified any format, we can customize the print for best experience if commandResult.provisioning_state == "Succeeded": # succeed, print exitcode, and logs print(f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, finished at {commandResult.finished_at}, with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}") print(commandResult.logs) return if commandResult.provisioning_state == "Failed": # failed, print reason in error print( f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}") return # *-ing state print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}") return None def _get_command_context(command_files): if not command_files: return "" filesToAttach = {} # . means to attach current folder, cannot combine more files. (at least for now) if len(command_files) == 1 and command_files[0] == ".": # current folder cwd = os.getcwd() for filefolder, _, files in os.walk(cwd): for file in files: # retain folder structure rel = os.path.relpath(filefolder, cwd) filesToAttach[os.path.join( filefolder, file)] = os.path.join(rel, file) else: for file in command_files: if file == ".": raise CLIError( ". is used to attach current folder, not expecting other attachements.") if os.path.isfile(file): # for individual attached file, flatten them to same folder filesToAttach[file] = os.path.basename(file) else: raise CLIError(f"{file} is not valid file, or not accessable.") if len(filesToAttach) < 1: logger.debug("no files to attach!") return "" zipStream = io.BytesIO() zipFile = zipfile.ZipFile(zipStream, "w") for _, (osfile, zipEntry) in enumerate(filesToAttach.items()): zipFile.write(osfile, zipEntry) # zipFile.printdir() // use this to debug zipFile.close() return str(base64.encodebytes(zipStream.getbuffer()), "utf-8") def _get_dataplane_aad_token(cli_ctx, serverAppId): # this function is mostly copied from keyvault cli import adal try: return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken') except adal.AdalError as err: # pylint: disable=no-member if (hasattr(err, 'error_response') and ('error_description' in err.error_response) and ('AADSTS70008:' in err.error_response['error_description'])): raise CLIError( "Credentials have expired due to inactivity. Please run 'az login'") raise CLIError(err) def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name): return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name) def _handle_addons_args(cmd, # pylint: disable=too-many-statements addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, aci_subnet_name=None, vnet_subnet_id=None, enable_secret_rotation=False): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('http_application_routing') if 'kube-dashboard' in addons: addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('kube-dashboard') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id) addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id, CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring}) addons.remove('monitoring') elif workspace_resource_id: raise CLIError( '"--workspace-resource-id" requires "--enable-addons monitoring".') if 'azure-policy' in addons: addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True) addons.remove('azure-policy') if 'gitops' in addons: addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True) addons.remove('gitops') if 'ingress-appgw' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile addons.remove('ingress-appgw') if 'open-service-mesh' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile addons.remove('open-service-mesh') if 'azure-keyvault-secrets-provider' in addons: addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"}) if enable_secret_rotation: addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile addons.remove('azure-keyvault-secrets-provider') if 'confcom' in addons: addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile addons.remove('confcom') if 'virtual-node' in addons: if not aci_subnet_name or not vnet_subnet_id: raise CLIError( '"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".') # TODO: how about aciConnectorwindows, what is its addon name? os_type = 'Linux' addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile( enabled=True, config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name} ) addons.remove('virtual-node') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # mapping for azure public cloud # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureCloudLocationToOmsRegionCodeMap = { "australiasoutheast": "ASE", "australiaeast": "EAU", "australiacentral": "CAU", "canadacentral": "CCA", "centralindia": "CIN", "centralus": "CUS", "eastasia": "EA", "eastus": "EUS", "eastus2": "EUS2", "eastus2euap": "EAP", "francecentral": "PAR", "japaneast": "EJP", "koreacentral": "SE", "northeurope": "NEU", "southcentralus": "SCUS", "southeastasia": "SEA", "uksouth": "SUK", "usgovvirginia": "USGV", "westcentralus": "EUS", "westeurope": "WEU", "westus": "WUS", "westus2": "WUS2", "brazilsouth": "CQ", "brazilsoutheast": "BRSE", "norwayeast": "NOE", "southafricanorth": "JNB", "northcentralus": "NCUS", "uaenorth": "DXB", "germanywestcentral": "DEWC", "ukwest": "WUK", "switzerlandnorth": "CHN", "switzerlandwest": "CHW", "uaecentral": "AUH" } AzureCloudRegionToOmsRegionMap = { "australiacentral": "australiacentral", "australiacentral2": "australiacentral", "australiaeast": "australiaeast", "australiasoutheast": "australiasoutheast", "brazilsouth": "brazilsouth", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "centralus", "centralindia": "centralindia", "eastasia": "eastasia", "eastus": "eastus", "eastus2": "eastus2", "francecentral": "francecentral", "francesouth": "francecentral", "japaneast": "japaneast", "japanwest": "japaneast", "koreacentral": "koreacentral", "koreasouth": "koreacentral", "northcentralus": "northcentralus", "northeurope": "northeurope", "southafricanorth": "southafricanorth", "southafricawest": "southafricanorth", "southcentralus": "southcentralus", "southeastasia": "southeastasia", "southindia": "centralindia", "uksouth": "uksouth", "ukwest": "ukwest", "westcentralus": "eastus", "westeurope": "westeurope", "westindia": "centralindia", "westus": "westus", "westus2": "westus2", "norwayeast": "norwayeast", "norwaywest": "norwayeast", "switzerlandnorth": "switzerlandnorth", "switzerlandwest": "switzerlandwest", "uaenorth": "uaenorth", "germanywestcentral": "germanywestcentral", "germanynorth": "germanywestcentral", "uaecentral": "uaecentral", "eastus2euap": "eastus2euap", "brazilsoutheast": "brazilsoutheast" } # mapping for azure china cloud # log analytics only support China East2 region AzureChinaLocationToOmsRegionCodeMap = { "chinaeast": "EAST2", "chinaeast2": "EAST2", "chinanorth": "EAST2", "chinanorth2": "EAST2" } AzureChinaRegionToOmsRegionMap = { "chinaeast": "chinaeast2", "chinaeast2": "chinaeast2", "chinanorth": "chinaeast2", "chinanorth2": "chinaeast2" } # mapping for azure us governmner cloud AzureFairfaxLocationToOmsRegionCodeMap = { "usgovvirginia": "USGV", "usgovarizona": "PHX" } AzureFairfaxRegionToOmsRegionMap = { "usgovvirginia": "usgovvirginia", "usgovtexas": "usgovvirginia", "usgovarizona": "usgovarizona" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurecloud': workspace_region = AzureCloudRegionToOmsRegionMap.get( rg_location, "eastus") workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get( workspace_region, "EUS") elif cloud_name.lower() == 'azurechinacloud': workspace_region = AzureChinaRegionToOmsRegionMap.get( rg_location, "chinaeast2") workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get( workspace_region, "EAST2") elif cloud_name.lower() == 'azureusgovernment': workspace_region = AzureFairfaxRegionToOmsRegionMap.get( rg_location, "usgovvirginia") workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get( workspace_region, "USGV") else: logger.error( "AKS Monitoring addon not supported in cloud : %s", cloud_name) default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format( subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) from azure.cli.core.profiles import ResourceType # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): from azure.core.exceptions import HttpResponseError try: resource = resources.get_by_id( default_workspace_resource_id, '2015-11-01-preview') return resource.id except HttpResponseError as ex: if ex.status_code != 404: raise ex else: ResourceGroup = cmd.get_models('ResourceGroup', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) resource_group = ResourceGroup(location=workspace_region) resource_groups.create_or_update(default_workspace_resource_group, resource_group) GenericResource = cmd.get_models('GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) generic_resource = GenericResource(location=workspace_region, properties={'sku': {'name': 'standalone'}}) async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', generic_resource) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _sanitize_loganalytics_ws_resource_id(workspace_resource_id): workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') return workspace_resource_id def _ensure_container_insights_for_monitoring(cmd, addon, cluster_subscription, cluster_resource_group_name, cluster_name, cluster_region, remove_monitoring=False, aad_route=False, create_dcr=False, create_dcra=False): """ Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA (Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace. Set aad_route == True to set up the DCR data route. Otherwise the solution route will be used. Create_dcr and create_dcra have no effect if aad_route == False. Set remove_monitoring to True and create_dcra to True to remove the DCRA from a cluster. The association makes it very hard to delete either the DCR or cluster. (It is not obvious how to even navigate to the association from the portal, and it prevents the cluster and DCR from being deleted individually). """ if not addon.enabled: return None # workaround for this addon key which has been seen lowercased in the wild for key in list(addon.config): if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop( key) workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip( ) if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] workspace_name = workspace_resource_id.split('/')[8] except IndexError: raise CLIError( 'Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id if not remove_monitoring: resources = cf_resources(cmd.cli_ctx, subscription_id) from azure.core.exceptions import HttpResponseError try: resource = resources.get_by_id( workspace_resource_id, '2015-11-01-preview') location = resource.location except HttpResponseError as ex: raise ex if aad_route: cluster_resource_id = f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" dataCollectionRuleName = f"DCR-{workspace_name}" dcr_resource_id = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}" from azure.cli.core.util import send_raw_request from azure.cli.core.profiles import ResourceType if create_dcr: # first get the association between region display names and region IDs (because for some reason # the "which RPs are available in which regions" check returns region display names) region_names_to_id = {} # retry the request up to two times for _ in range(3): try: location_list_url = f"https://management.azure.com/subscriptions/{subscription_id}/locations?api-version=2019-11-01" r = send_raw_request(cmd.cli_ctx, "GET", location_list_url) # this is required to fool the static analyzer. The else statement will only run if an exception # is thrown, but flake8 will complain that e is undefined if we don't also define it here. error = None break except CLIError as e: error = e else: # This will run if the above for loop was not broken out of. This means all three requests failed raise error json_response = json.loads(r.text) for region_data in json_response["value"]: region_names_to_id[region_data["displayName"]] = region_data["name"] # check if region supports DCRs and DCR-A for _ in range(3): try: feature_check_url = f"https://management.azure.com/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01" r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url) error = None break except CLIError as e: error = e else: raise error json_response = json.loads(r.text) for resource in json_response["resourceTypes"]: region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # map is lazy, so doing this for every region isn't slow if resource["resourceType"].lower() == "datacollectionrules" and location not in region_ids: raise ClientRequestError(f'Data Collection Rules are not supported for LA workspace region {location}') elif resource["resourceType"].lower() == "datacollectionruleassociations" and cluster_region not in region_ids: raise ClientRequestError(f'Data Collection Rule Associations are not supported for cluster region {location}') # create the DCR dcr_creation_body = json.dumps({"location": location, "properties": { "dataSources": { "extensions": [ { "name": "ContainerInsightsExtension", "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics" ], "extensionName": "ContainerInsights" } ] }, "dataFlows": [ { "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-InsightsMetrics" ], "destinations": [ "la-workspace" ] } ], "destinations": { "logAnalytics": [ { "workspaceResourceId": workspace_resource_id, "name": "la-workspace" } ] } }}) dcr_url = f"https://management.azure.com/{dcr_resource_id}?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body) error = None break except CLIError as e: error = e else: raise error if create_dcra: # only create or delete the association between the DCR and cluster association_body = json.dumps({"location": cluster_region, "properties": { "dataCollectionRuleId": dcr_resource_id, "description": "routes monitoring data to a Log Analytics workspace" }}) association_url = f"https://management.azure.com/{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/send-to-{workspace_name}?api-version=2019-11-01-preview" for _ in range(3): try: send_raw_request(cmd.cli_ctx, "PUT" if not remove_monitoring else "DELETE", association_url, body=association_body) error = None break except CLIError as e: error = e else: raise error else: # legacy auth with LA workspace solution unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format( unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, fqdn_subdomain=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal( subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') if dns_name_prefix: url = 'http://{}.{}.{}.cloudapp.azure.com'.format( salt, dns_name_prefix, location) else: url = 'http://{}.{}.{}.cloudapp.azure.com'.format( salt, fqdn_subdomain, location) service_principal = _build_service_principal( rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError( '--client-secret is required if --service-principal is specified') store_acs_service_principal( subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile): if enable_cluster_autoscaler: if min_count is None or max_count is None: raise CLIError( 'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled') if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError( 'node-count is not in the range of min-count and max-count') agent_pool_profile.min_count = int(min_count) agent_pool_profile.max_count = int(max_count) agent_pool_profile.enable_auto_scaling = True else: if min_count is not None or max_count is not None: raise CLIError( 'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag') def _create_client_secret(): # Add a special character to satsify AAD SP secret requirements special_char = '$' client_secret = binascii.b2a_hex( os.urandom(10)).decode('utf-8') + special_char return client_secret def _ensure_aks_acr(cli_ctx, client_id, acr_name_or_id, subscription_id, # pylint: disable=unused-argument detach=False): from msrestazure.tools import is_valid_resource_id, parse_resource_id # Check if the ACR exists by resource ID. if is_valid_resource_id(acr_name_or_id): try: parsed_registry = parse_resource_id(acr_name_or_id) acr_client = cf_container_registry_service( cli_ctx, subscription_id=parsed_registry['subscription']) registry = acr_client.registries.get( parsed_registry['resource_group'], parsed_registry['name']) except CloudError as ex: raise CLIError(ex.message) _ensure_aks_acr_role_assignment( cli_ctx, client_id, registry.id, detach) return # Check if the ACR exists by name accross all resource groups. registry_name = acr_name_or_id registry_resource = 'Microsoft.ContainerRegistry/registries' try: registry = get_resource_by_name( cli_ctx, registry_name, registry_resource) except CloudError as ex: if 'was not found' in ex.message: raise CLIError( "ACR {} not found. Have you provided the right ACR name?".format(registry_name)) raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return def _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry_id, detach=False): if detach: if not _delete_role_assignments(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not delete role assignments for ACR. ' 'Are you an Owner on this subscription?') return if not _add_role_assignment(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not create a role assignment for ACR. ' 'Are you an Owner on this subscription?') return def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id): # Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id vnet_id = vnet_subnet_id.rpartition('/')[0] vnet_id = vnet_id.rpartition('/')[0] service_principal_msi_id = None is_service_principal = False os_type = 'Linux' addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id.lower() != 'msi' ): logger.info('valid service principal exists, using it') service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (addon_name in result.addon_profiles) and (hasattr(result.addon_profiles[addon_name], 'identity')) and (hasattr(result.addon_profiles[addon_name].identity, 'object_id')) ): logger.info('virtual node MSI exists, using it') service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id is_service_principal = False if service_principal_msi_id is not None: if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=vnet_id): logger.warning('Could not create a role assignment for virtual node addon. ' 'Are you an Owner on this subscription?') else: logger.warning('Could not find service principal or user assigned MSI for role' 'assignment') def aks_agentpool_show(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name): instance = client.get(resource_group_name, cluster_name, nodepool_name) return instance def aks_agentpool_list(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name): return client.list(resource_group_name, cluster_name) def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals client, resource_group_name, cluster_name, nodepool_name, tags=None, kubernetes_version=None, node_zones=None, enable_node_public_ip=False, node_public_ip_prefix_id=None, node_vm_size=None, node_osdisk_type=None, node_osdisk_size=0, node_count=3, vnet_subnet_id=None, pod_subnet_id=None, ppg=None, max_pods=0, os_type="Linux", os_sku=None, enable_fips_image=False, min_count=None, max_count=None, enable_cluster_autoscaler=False, node_taints=None, priority=CONST_SCALE_SET_PRIORITY_REGULAR, eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE, spot_max_price=float('nan'), labels=None, max_surge=None, mode="User", aks_custom_headers=None, kubelet_config=None, linux_os_config=None, enable_encryption_at_host=False, enable_ultra_ssd=False, no_wait=False): instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name == nodepool_name: raise CLIError("Node pool {} already exists, please try a different name, " "use 'aks nodepool list' to get current list of node pool".format(nodepool_name)) upgradeSettings = AgentPoolUpgradeSettings() taints_array = [] if node_taints is not None: for taint in node_taints.split(','): try: taint = taint.strip() taints_array.append(taint) except ValueError: raise CLIError( 'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".') if node_vm_size is None: if os_type == "Windows": node_vm_size = "Standard_D2s_v3" else: node_vm_size = "Standard_DS2_v2" if max_surge: upgradeSettings.max_surge = max_surge agent_pool = AgentPool( name=nodepool_name, tags=tags, node_labels=labels, count=int(node_count), vm_size=node_vm_size, os_type=os_type, os_sku=os_sku, enable_fips=enable_fips_image, storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, pod_subnet_id=pod_subnet_id, proximity_placement_group_id=ppg, agent_pool_type="VirtualMachineScaleSets", max_pods=int(max_pods) if max_pods else None, orchestrator_version=kubernetes_version, availability_zones=node_zones, enable_node_public_ip=enable_node_public_ip, node_public_ip_prefix_id=node_public_ip_prefix_id, node_taints=taints_array, scale_set_priority=priority, upgrade_settings=upgradeSettings, enable_encryption_at_host=enable_encryption_at_host, enable_ultra_ssd=enable_ultra_ssd, mode=mode ) if priority == CONST_SCALE_SET_PRIORITY_SPOT: agent_pool.scale_set_eviction_policy = eviction_policy if isnan(spot_max_price): spot_max_price = -1 agent_pool.spot_max_price = spot_max_price _check_cluster_autoscaler_flag( enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool) if node_osdisk_size: agent_pool.os_disk_size_gb = int(node_osdisk_size) if node_osdisk_type: agent_pool.os_disk_type = node_osdisk_type if kubelet_config: agent_pool.kubelet_config = _get_kubelet_config(kubelet_config) if linux_os_config: agent_pool.linux_os_config = _get_linux_os_config(linux_os_config) headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers) def aks_agentpool_scale(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, node_count=3, no_wait=False): instance = client.get(resource_group_name, cluster_name, nodepool_name) new_node_count = int(node_count) if instance.enable_auto_scaling: raise CLIError("Cannot scale cluster autoscaler enabled node pool.") if new_node_count == instance.count: raise CLIError( "The new node count is the same as the current node count.") instance.count = new_node_count # pylint: disable=no-member return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, kubernetes_version='', no_wait=False, node_image_only=False, max_surge=None, aks_custom_headers=None): if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: return _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name) instance = client.get(resource_group_name, cluster_name, nodepool_name) instance.orchestrator_version = kubernetes_version if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge headers = get_aks_custom_headers(aks_custom_headers) return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers) def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name): return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name) def aks_agentpool_update(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, tags=None, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, min_count=None, max_count=None, max_surge=None, mode=None, no_wait=False): update_autoscaler = enable_cluster_autoscaler + \ disable_cluster_autoscaler + update_cluster_autoscaler if (update_autoscaler != 1 and not tags and not mode and not max_surge): raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--tags" or "--mode" or "--max-surge"') instance = client.get(resource_group_name, cluster_name, nodepool_name) if min_count is None or max_count is None: if enable_cluster_autoscaler or update_cluster_autoscaler: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError( 'value of min-count should be less than or equal to value of max-count.') if enable_cluster_autoscaler: if instance.enable_auto_scaling: logger.warning('Autoscaler is already enabled for this node pool.\n' 'Please run "az aks nodepool update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.min_count = int(min_count) instance.max_count = int(max_count) instance.enable_auto_scaling = True if update_cluster_autoscaler: if not instance.enable_auto_scaling: raise CLIError('Autoscaler is not enabled for this node pool.\n' 'Run "az aks nodepool update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.min_count = int(min_count) instance.max_count = int(max_count) if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge if disable_cluster_autoscaler: if not instance.enable_auto_scaling: logger.warning( 'Autoscaler is already disabled for this node pool.') return None instance.enable_auto_scaling = False instance.min_count = None instance.max_count = None instance.tags = tags if mode is not None: instance.mode = mode return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_delete(cmd, # pylint: disable=unused-argument client, resource_group_name, cluster_name, nodepool_name, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise CLIError("Node pool {} doesnt exist, " "use 'aks nodepool list' to get current node pool list".format(nodepool_name)) return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name) def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) try: if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \ instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \ CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \ str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true': # remove the DCR association because otherwise the DCR can't be deleted _ensure_container_insights_for_monitoring( cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, remove_monitoring=True, aad_route=True, create_dcr=False, create_dcra=True ) except TypeError: pass instance = _update_addons( cmd, instance, subscription_id, resource_group_name, name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False, enable_msi_auth_for_monitoring=False): instance = client.get(resource_group_name, name) msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here subscription_id = get_subscription_id(cmd.cli_ctx) instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True, workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, no_wait=no_wait) if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled: if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \ str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true': if not msi_auth: raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.") else: # create a Data Collection Rule (DCR) and associate it with the cluster _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True) else: # monitoring addon will use legacy path _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False) monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[ CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' enable_virtual_node = False if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles: enable_virtual_node = True need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node if need_post_creation_role_assignment: # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)( client.begin_create_or_update(resource_group_name, name, instance)) cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if monitoring and cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(result, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: _add_ingress_appgw_addon_role_assignment(result, cmd) if enable_virtual_node: # All agent pool will reside in the same vnet, we will grant vnet level Contributor role # in later function, so using a random agent pool here is OK random_agent_pool = result.agent_pool_profiles[0] if random_agent_pool.vnet_subnet_id != "": _add_virtual_node_role_assignment( cmd, result, random_agent_pool.vnet_subnet_id) # Else, the cluster is not using custom VNet, the permission is already granted in AKS RP, # we don't need to handle it in client side in this case. else: result = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance) return result def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name) def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements instance, subscription_id, resource_group_name, name, addons, enable, workspace_resource_id=None, enable_msi_auth_for_monitoring=False, subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False): # pylint: disable=unused-argument # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} os_type = 'Linux' # for each addons argument for addon_arg in addon_args: if addon_arg not in ADDONS: raise CLIError("Invalid addon name: {}.".format(addon_arg)) addon = ADDONS[addon_arg] if addon == CONST_VIRTUAL_NODE_ADDON_NAME: # only linux is supported for now, in the future this will be a user flag addon += os_type # honor addon names defined in Azure CLI for key in list(addon_profiles): if key.lower() == addon.lower() and key != addon: addon_profiles[addon] = addon_profiles.pop(key) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get( addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == CONST_MONITORING_ADDON_NAME: logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id) addon_profile.config = {logAnalyticsConstName: workspace_resource_id} addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError( 'The aci-connector addon requires setting a subnet name.') addon_profile.config = { CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name} elif addon == CONST_INGRESS_APPGW_ADDON_NAME: if addon_profile.enabled: raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n' 'To change ingress-appgw configuration, run ' f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_prefix is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME: if addon_profile.enabled: raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n' 'To change open-service-mesh configuration, run ' f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={}) elif addon == CONST_CONFCOM_ADDON_NAME: if addon_profile.enabled: raise CLIError('The confcom addon is already enabled for this managed cluster.\n' 'To change confcom configuration, run ' f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME: if addon_profile.enabled: raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n' 'To change azure-keyvault-secrets-provider configuration, run ' f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"}) if enable_secret_rotation: addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true" addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: if addon == CONST_KUBE_DASHBOARD_ADDON_NAME: addon_profiles[addon] = ManagedClusterAddonProfile( enabled=False) else: raise CLIError( "The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument return client.get_os_options(location, resource_type='managedClusters') def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations( path, temp_path, overwrite_existing, context_name) except yaml.YAMLError as ex: logger.warning( 'Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _handle_merge(existing, addition, key, replace): if not addition[key]: return if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: from knack.prompting import prompt_y_n msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) if context_name is not None: addition['contexts'][0]['name'] = context_name addition['contexts'][0]['context']['cluster'] = context_name addition['clusters'][0]['name'] = context_name addition['current-context'] = context_name # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError( 'failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format( stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format( current_context, existing_file) print(msg) def cloud_storage_account_service_factory(cli_ctx, kwargs): from azure.cli.core.profiles import ResourceType, get_sdk t_cloud_storage_account = get_sdk( cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount') account_name = kwargs.pop('account_name', None) account_key = kwargs.pop('account_key', None) sas_token = kwargs.pop('sas_token', None) kwargs.pop('connection_string', None) return t_cloud_storage_account(account_name, account_key, sas_token) def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name): from azure.mgmt.monitor import MonitorManagementClient diag_settings_client = get_mgmt_service_client( cli_ctx, MonitorManagementClient).diagnostic_settings subscription_id = get_subscription_id(cli_ctx) aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \ '/managedClusters/{2}'.format(subscription_id, resource_group_name, name) diag_settings = diag_settings_client.list(aks_resource_id) if diag_settings.value: return diag_settings.value[0].storage_account_id print("No diag settings specified") return None def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') nodes = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"], universal_newlines=True) logger.debug(nodes) node_lines = nodes.splitlines() ready_nodes = {} for node_line in node_lines: columns = node_line.split() logger.debug(node_line) if columns[1] != "Ready": logger.warning( "Node %s is not Ready. Current state is: %s.", columns[0], columns[1]) else: ready_nodes[columns[0]] = False logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes))) if not ready_nodes: logger.warning( 'No nodes are ready in the current cluster. Diagnostics info might not be available.') network_config_array = [] network_status_array = [] apds_created = False max_retry = 10 for retry in range(0, max_retry): if not apds_created: apd = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"], universal_newlines=True ) apd_lines = apd.splitlines() if apd_lines and 'No resources found' in apd_lines[0]: apd_lines.pop(0) print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines), len(ready_nodes), '.' * retry), end='') if len(apd_lines) < len(ready_nodes): time.sleep(3) else: apds_created = True print() else: for node_name in ready_nodes: if ready_nodes[node_name]: continue apdName = "aks-periscope-diagnostic-" + node_name try: network_config = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkconfig}"], universal_newlines=True) logger.debug('Dns status for node %s is %s', node_name, network_config) network_status = subprocess.check_output( ["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", apdName, "-n", "aks-periscope", "-o=jsonpath={.spec.networkoutbound}"], universal_newlines=True) logger.debug('Network status for node %s is %s', node_name, network_status) if not network_config or not network_status: print("The diagnostics information for node {} is not ready yet. " "Will try again in 10 seconds.".format(node_name)) time.sleep(10) break network_config_array += json.loads( '[' + network_config + ']') network_status_object = json.loads(network_status) network_status_array += format_diag_status( network_status_object) ready_nodes[node_name] = True except subprocess.CalledProcessError as err: raise CLIError(err.output) print() if network_config_array: print("Below are the network configuration for each node: ") print() print(tabulate(network_config_array, headers="keys", tablefmt='simple')) print() else: logger.warning("Could not get network config. " "Please run 'az aks kanalyze' command later to get the analysis results.") if network_status_array: print("Below are the network connectivity results for each node:") print() print(tabulate(network_status_array, headers="keys", tablefmt='simple')) else: logger.warning("Could not get networking status. " "Please run 'az aks kanalyze' command later to get the analysis results.") def format_diag_status(diag_status): for diag in diag_status: if diag["Status"]: if "Error:" in diag["Status"]: diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}' else: diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}' return diag_status def format_bright(msg): return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}' def format_hyperlink(the_link): return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}' def get_aks_custom_headers(aks_custom_headers=None): headers = {} if aks_custom_headers is not None: if aks_custom_headers != "": for pair in aks_custom_headers.split(','): parts = pair.split('=') if len(parts) != 2: raise CLIError('custom headers format is incorrect') headers[parts[0]] = parts[1] return headers def _put_managed_cluster_ensuring_permission( cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches client, subscription_id, resource_group_name, name, managed_cluster, monitoring_addon_enabled, ingress_appgw_addon_enabled, virtual_node_addon_enabled, need_grant_vnet_permission_to_cluster_identity, vnet_subnet_id, enable_managed_identity, attach_acr, headers, no_wait ): # some addons require post cluster creation role assigment need_post_creation_role_assignment = (monitoring_addon_enabled or ingress_appgw_addon_enabled or (enable_managed_identity and attach_acr) or virtual_node_addon_enabled or need_grant_vnet_permission_to_cluster_identity) if need_post_creation_role_assignment: # adding a wait here since we rely on the result for role assignment cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update( resource_group_name=resource_group_name, resource_name=name, parameters=managed_cluster, headers=headers)) cloud_name = cmd.cli_ctx.cloud.name # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(cluster, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: _add_ingress_appgw_addon_role_assignment(cluster, cmd) if virtual_node_addon_enabled: _add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id) if need_grant_vnet_permission_to_cluster_identity: if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor', cluster.identity.principal_id, scope=vnet_subnet_id, resolve_assignee=False): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') if enable_managed_identity and attach_acr: # Attach ACR to cluster enabled managed identity if cluster.identity_profile is None or \ cluster.identity_profile["kubeletidentity"] is None: logger.warning('Your cluster is successfully created, but we failed to attach ' 'acr to it, you can manually grant permission to the identity ' 'named <ClUSTER_NAME>-agentpool in MC_ resource group to give ' 'it permission to pull from ACR.') else: kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id _ensure_aks_acr(cmd.cli_ctx, client_id=kubelet_identity_client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) else: cluster = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=managed_cluster, headers=headers) return cluster def _is_msi_cluster(managed_cluster): return (managed_cluster and managed_cluster.identity and (managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned")) def _get_kubelet_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) kubelet_config = get_file_json(file_path) if not isinstance(kubelet_config, dict): raise CLIError( "Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object = KubeletConfig() config_object.cpu_manager_policy = kubelet_config.get( "cpuManagerPolicy", None) config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None) config_object.cpu_cfs_quota_period = kubelet_config.get( "cpuCfsQuotaPeriod", None) config_object.image_gc_high_threshold = kubelet_config.get( "imageGcHighThreshold", None) config_object.image_gc_low_threshold = kubelet_config.get( "imageGcLowThreshold", None) config_object.topology_manager_policy = kubelet_config.get( "topologyManagerPolicy", None) config_object.allowed_unsafe_sysctls = kubelet_config.get( "allowedUnsafeSysctls", None) config_object.fail_swap_on = kubelet_config.get("failSwapOn", None) config_object.container_log_max_files = kubelet_config.get( "containerLogMaxFiles", None) config_object.container_log_max_size_mb = kubelet_config.get( "containerLogMaxSizeMb", None) return config_object def _get_linux_os_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) os_config = get_file_json(file_path) if not isinstance(os_config, dict): raise CLIError( "Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object = LinuxOSConfig() config_object.transparent_huge_page_enabled = os_config.get( "transparentHugePageEnabled", None) config_object.transparent_huge_page_defrag = os_config.get( "transparentHugePageDefrag", None) config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None) # sysctl settings sysctls = os_config.get("sysctls", None) if not isinstance(sysctls, dict): raise CLIError( "Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path)) config_object.sysctls = SysctlConfig() config_object.sysctls.net_core_somaxconn = sysctls.get( "netCoreSomaxconn", None) config_object.sysctls.net_core_netdev_max_backlog = sysctls.get( "netCoreNetdevMaxBacklog", None) config_object.sysctls.net_core_rmem_max = sysctls.get( "netCoreRmemMax", None) config_object.sysctls.net_core_wmem_max = sysctls.get( "netCoreWmemMax", None) config_object.sysctls.net_core_optmem_max = sysctls.get( "netCoreOptmemMax", None) config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get( "netIpv4TcpMaxSynBacklog", None) config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get( "netIpv4TcpMaxTwBuckets", None) config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get( "netIpv4TcpFinTimeout", None) config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get( "netIpv4TcpKeepaliveTime", None) config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get( "netIpv4TcpKeepaliveProbes", None) config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get( "netIpv4TcpkeepaliveIntvl", None) config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get( "netIpv4TcpRmem", None) config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get( "netIpv4TcpWmem", None) config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get( "netIpv4TcpTwReuse", None) config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get( "netIpv4IpLocalPortRange", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get( "netIpv4NeighDefaultGcThresh1", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get( "netIpv4NeighDefaultGcThresh2", None) config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get( "netIpv4NeighDefaultGcThresh3", None) config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get( "netNetfilterNfConntrackMax", None) config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get( "netNetfilterNfConntrackBuckets", None) config_object.sysctls.fs_inotify_max_user_watches = sysctls.get( "fsInotifyMaxUserWatches", None) config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None) config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None) config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None) config_object.sysctls.kernel_threads_max = sysctls.get( "kernelThreadsMax", None) config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None) config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None) config_object.sysctls.vm_vfs_cache_pressure = sysctls.get( "vmVfsCachePressure", None) return config_object def _get_http_proxy_config(file_path): if not os.path.isfile(file_path): raise CLIError("{} is not valid file, or not accessable.".format(file_path)) hp_config = get_file_json(file_path) if not isinstance(hp_config, dict): raise CLIError( "Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path)) config_object = ManagedClusterHTTPProxyConfig() config_object.http_proxy = hp_config.get("httpProxy", None) config_object.https_proxy = hp_config.get("httpsProxy", None) config_object.no_proxy = hp_config.get("noProxy", None) config_object.trusted_ca = hp_config.get("trustedCa", None) return config_object def _is_pod_identity_addon_enabled(instance): if not instance: return False if not instance.pod_identity_profile: return False return bool(instance.pod_identity_profile.enabled) def _ensure_pod_identity_addon_is_enabled(instance): if not _is_pod_identity_addon_enabled(instance): raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n' 'To enable, run "az aks update --enable-pod-identity') def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent): if not network_profile or not network_profile.network_plugin: # invalid data return if network_profile.network_plugin.lower() != 'kubenet': # not kubenet, no need to check return if customer_consent is None: # no set this time, read from previous value customer_consent = bool( pod_identity_profile.allow_network_plugin_kubenet) if not customer_consent: raise CLIError( '--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin') pod_identity_profile.allow_network_plugin_kubenet = True def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None): if not enable: # when disable, remove previous saved value instance.pod_identity_profile = ManagedClusterPodIdentityProfile( enabled=False) return if not instance.pod_identity_profile: # not set before instance.pod_identity_profile = ManagedClusterPodIdentityProfile( enabled=enable, user_assigned_identities=pod_identities, user_assigned_identity_exceptions=pod_identity_exceptions, ) _ensure_pod_identity_kubenet_consent( instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent) instance.pod_identity_profile.enabled = enable instance.pod_identity_profile.user_assigned_identities = pod_identities or [] instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or [] def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope): cluster_identity_object_id = None if instance.identity.type.lower() == 'userassigned': for identity in instance.identity.user_assigned_identities.values(): cluster_identity_object_id = identity.principal_id break elif instance.identity.type.lower() == 'systemassigned': cluster_identity_object_id = instance.identity.principal_id else: raise CLIError('unsupported identity type: {}'.format( instance.identity.type)) if cluster_identity_object_id is None: raise CLIError('unable to resolve cluster identity') factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope.lower() != scope.lower(): continue if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID): continue if i.principal_id.lower() != cluster_identity_object_id.lower(): continue # already assigned return if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id, is_service_principal=False, scope=scope): raise CLIError( 'Could not grant Managed Identity Operator permission for cluster') # need more time to propogate this assignment... print() print('Wait 30 seconds for identity role assignment propagation.') time.sleep(30) def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name, identity_name, identity_namespace, identity_resource_id, binding_selector=None, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) user_assigned_identity = _get_user_assigned_identity( cmd.cli_ctx, identity_resource_id) _ensure_managed_identity_operator_permission( cmd.cli_ctx, instance, user_assigned_identity.id) pod_identities = [] if instance.pod_identity_profile.user_assigned_identities: pod_identities = instance.pod_identity_profile.user_assigned_identities pod_identity = ManagedClusterPodIdentity( name=identity_name, namespace=identity_namespace, identity=UserAssignedIdentity( resource_id=user_assigned_identity.id, client_id=user_assigned_identity.client_id, object_id=user_assigned_identity.principal_id, ) ) if binding_selector is not None: pod_identity.binding_selector = binding_selector pod_identities.append(pod_identity) _update_addon_pod_identity( instance, enable=True, pod_identities=pod_identities, pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name, identity_name, identity_namespace, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identities = [] if instance.pod_identity_profile.user_assigned_identities: for pod_identity in instance.pod_identity_profile.user_assigned_identities: if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace: # to remove continue pod_identities.append(pod_identity) _update_addon_pod_identity( instance, enable=True, pod_identities=pod_identities, pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) return _remove_nulls([instance])[0] def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions exc = ManagedClusterPodIdentityException( name=exc_name, namespace=exc_namespace, pod_labels=pod_labels) pod_identity_exceptions.append(exc) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: for exc in instance.pod_identity_profile.user_assigned_identity_exceptions: if exc.name == exc_name and exc.namespace == exc_namespace: # to remove continue pod_identity_exceptions.append(exc) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name, exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument instance = client.get(resource_group_name, cluster_name) _ensure_pod_identity_addon_is_enabled(instance) found_target = False updated_exc = ManagedClusterPodIdentityException( name=exc_name, namespace=exc_namespace, pod_labels=pod_labels) pod_identity_exceptions = [] if instance.pod_identity_profile.user_assigned_identity_exceptions: for exc in instance.pod_identity_profile.user_assigned_identity_exceptions: if exc.name == exc_name and exc.namespace == exc_namespace: found_target = True pod_identity_exceptions.append(updated_exc) else: pod_identity_exceptions.append(exc) if not found_target: raise CLIError( 'pod identity exception {}/{} not found'.format(exc_namespace, exc_name)) _update_addon_pod_identity( instance, enable=True, pod_identities=instance.pod_identity_profile.user_assigned_identities, pod_identity_exceptions=pod_identity_exceptions, ) # send the managed cluster represeentation to update the pod identity addon return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance) def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name): instance = client.get(resource_group_name, cluster_name) return _remove_nulls([instance])[0] def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope.lower() != scope.lower(): continue if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID): continue if i.principal_id.lower() != cluster_identity_object_id.lower(): continue # already assigned return if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id, is_service_principal=False, scope=scope): raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope)) def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
TScanAPIDemo_pyqt5.py
from ctypes import * from PyQt5.QtWidgets import * from TSMsater_32 import TScanAPI import sys import threading from threading import Lock, Thread canfd = TScanAPI.TLIBCANFD() obj1 = c_size_t(0) def OnPreRxCANEvent(ACAN): print("回调事件发送接受can.FIdentifier = ", ACAN.contents.FIdentifier) OnRxCANEvent = TScanAPI.OnTx_RxFUNC_CAN(OnPreRxCANEvent) size = c_int(16) class FirstDemo(QWidget): def __init__(self): super(FirstDemo, self).__init__() self.initUI() def initUI(self): self.resize(300, 200) self.setWindowTitle("TSMasterAPIDemo") layout = QVBoxLayout() self.btn_ConnectAPI = QPushButton("连接硬件") self.btn_ConnectAPI.clicked.connect(self.ConnectAPI) self.btn_SendMessage = QPushButton("发送报文") self.btn_SendMessage.clicked.connect(self.SendMessage) self.btn_DisConnectAPI = QPushButton("断开连接") self.btn_DisConnectAPI.clicked.connect(self.DisConnectAPI) self.btn_event_Rxcan = QPushButton("Rx事件") self.btn_event_Rxcan.clicked.connect(self.OnCANRxEvent) layout.addWidget(self.btn_ConnectAPI) layout.addWidget(self.btn_SendMessage) layout.addWidget(self.btn_DisConnectAPI) layout.addWidget(self.btn_event_Rxcan) self.setLayout(layout) def ConnectAPI(self): TScanAPI.initialize_lib_tsmaster(True, False) connectAPI = TScanAPI.tsapp_connect('', obj1) connectAPI = TScanAPI.tsapp_register_event_can(obj1, OnRxCANEvent) # CAN波特率 # connectAPI = TScanAPI.tsapp_configure_baudrate_can(obj1, 0, c_double(500), 1) # connectAPI = TScanAPI.tsapp_configure_baudrate_can(obj1, 1, c_double(500), 1) # CANFD波特率 connectAPI = TScanAPI.tsapp_configure_baudrate_canfd(obj1, 0, c_double(500), c_double(2000), TScanAPI.TLIBCANFDControllerType.lfdtISOCAN.value, TScanAPI.TLIBCANFDControllerMode.lfdmNormal.value, TScanAPI.A120.ENABLEA120.value) connectAPI = TScanAPI.tsapp_configure_baudrate_canfd(obj1, 1, c_double(500), c_double(2000), TScanAPI.TLIBCANFDControllerType.lfdtISOCAN.value, TScanAPI.TLIBCANFDControllerMode.lfdmNormal.value, TScanAPI.A120.ENABLEA120.value) print(connectAPI) if (connectAPI == 0): print("连接成功") # 多线程异步接收 # 需要循环运行TScanAPI.tsapp_receive_can_msgs,在API中注释的代码,可直接运行 # t_sing = threading.Thread(target=TScanAPI.tsapp_receive_can_msgs, args=(obj1,message1,size,0,1)) # t_sing.start() else: print('连接失败') def SendMessage(self): msg = TScanAPI.TLIBCAN() msg.FIdxChn = 0 msg.FIdentifier = 0x100 msg.FProperties = 5 msg.FDLC = 8 FData = [0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17] for i in range(len(FData)): msg.FData[i] = FData[i] ret = TScanAPI.tsapp_transmit_can_async(obj1, msg) # print(ret) msg1 = TScanAPI.TLIBCANFD() msg1.FIdxChn = 0 msg1.FIdentifier = 0x101 msg1.FProperties = 5 msg1.FFDProperties = 1 msg1.FDLC = 11 FData1 = [0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20] for i in range(len(FData1)): msg1.FData[i] = FData1[i] ret = TScanAPI.tsapp_transmit_canfd_async(obj1, msg1) # print(ret) def OnCANRxEvent(self): list = [] for i in range(16): item = TScanAPI.TLIBCAN() list.append(item) message1 = TScanAPI.TLIBCAN() list1 = [] for i in range(16): item = TScanAPI.TLIBCANFD() list1.append(item) chn = c_ubyte(1) # print(len(list)) txrx = c_ubyte(1) # print(message1) ret = TScanAPI.tsapp_receive_can_msgs(obj1, list, size, chn, txrx) for i in range(16): if list[i].FIdentifier != 0x00: print("{:#x}".format(list[i].FIdentifier)) ret = TScanAPI.tsapp_receive_canfd_msgs(obj1, list1, size, chn, txrx) for i in range(16): if list1[i].FIdentifier != 0x00: print("{:#x}".format(list1[i].FIdentifier)) def DisConnectAPI(self): pass if __name__ == '__main__': app = QApplication(sys.argv) main = FirstDemo() main.show() sys.exit(app.exec_())
A3C.py
""" Environment is a Robot Arm. The arm tries to get to the blue point. The environment will return a geographic (distance) information for the arm to learn. The far away from blue point the less reward; touch blue r+=1; stop at blue for a while then get r=+10. You can train this RL by using LOAD = False, after training, this model will be store in the a local folder. Using LOAD = True to reload the trained model for playing. You can customize this script in a way you want. View more on [莫烦Python] : https://morvanzhou.github.io/2_tensorflow_old/ Requirement: pyglet >= 1.2.4 numpy >= 1.12.1 tensorflow >= 1.0.1 """ import multiprocessing import threading import tensorflow as tf import numpy as np from arm_env import ArmEnv # np.random.seed(1) # tf.set_random_seed(1) MAX_GLOBAL_EP = 2000 MAX_EP_STEP = 300 UPDATE_GLOBAL_ITER = 5 N_WORKERS = multiprocessing.cpu_count() LR_A = 1e-4 # 1_tensorflow_new rate for actor LR_C = 2e-4 # 1_tensorflow_new rate for critic GAMMA = 0.9 # reward discount MODE = ['easy', 'hard'] n_model = 1 GLOBAL_NET_SCOPE = 'Global_Net' ENTROPY_BETA = 0.01 GLOBAL_RUNNING_R = [] GLOBAL_EP = 0 env = ArmEnv(mode=MODE[n_model]) N_S = env.state_dim N_A = env.action_dim A_BOUND = env.action_bound del env class ACNet(object): def __init__(self, scope, globalAC=None): if scope == GLOBAL_NET_SCOPE: # get global network with tf.variable_scope(scope): self.s = tf.placeholder(tf.float32, [None, N_S], 'S') self._build_net() self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') else: # local net, calculate losses with tf.variable_scope(scope): self.s = tf.placeholder(tf.float32, [None, N_S], 'S') self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A') self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget') mu, sigma, self.v = self._build_net() td = tf.subtract(self.v_target, self.v, name='TD_error') with tf.name_scope('c_loss'): self.c_loss = tf.reduce_mean(tf.square(td)) with tf.name_scope('wrap_a_out'): self.test = sigma[0] mu, sigma = mu * A_BOUND[1], sigma + 1e-5 normal_dist = tf.contrib.distributions.Normal(mu, sigma) with tf.name_scope('a_loss'): log_prob = normal_dist.log_prob(self.a_his) exp_v = log_prob * td entropy = normal_dist.entropy() # encourage exploration self.exp_v = ENTROPY_BETA * entropy + exp_v self.a_loss = tf.reduce_mean(-self.exp_v) with tf.name_scope('choose_a'): # use local params to choose action self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND) with tf.name_scope('local_grad'): self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') self.a_grads = tf.gradients(self.a_loss, self.a_params) self.c_grads = tf.gradients(self.c_loss, self.c_params) with tf.name_scope('sync'): with tf.name_scope('pull'): self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)] self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)] with tf.name_scope('push'): self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params)) self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params)) def _build_net(self): w_init = tf.contrib.layers.xavier_initializer() with tf.variable_scope('actor'): l_a = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init, name='la') l_a = tf.layers.dense(l_a, 300, tf.nn.relu6, kernel_initializer=w_init, name='la2') mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu') sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma') with tf.variable_scope('critic'): l_c = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init, name='lc') l_c = tf.layers.dense(l_c, 200, tf.nn.relu6, kernel_initializer=w_init, name='lc2') v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value return mu, sigma, v def update_global(self, feed_dict): # run by a local _, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net return t def pull_global(self): # run by a local SESS.run([self.pull_a_params_op, self.pull_c_params_op]) def choose_action(self, s): # run by a local s = s[np.newaxis, :] return SESS.run(self.A, {self.s: s})[0] class Worker(object): def __init__(self, name, globalAC): self.env = ArmEnv(mode=MODE[n_model]) self.name = name self.AC = ACNet(name, globalAC) def work(self): global GLOBAL_RUNNING_R, GLOBAL_EP total_step = 1 buffer_s, buffer_a, buffer_r = [], [], [] while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP: s = self.env.reset() ep_r = 0 for ep_t in range(MAX_EP_STEP): if self.name == 'W_0': self.env.render() a = self.AC.choose_action(s) s_, r, done = self.env.step(a) if ep_t == MAX_EP_STEP - 1: done = True ep_r += r buffer_s.append(s) buffer_a.append(a) buffer_r.append(r) if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net if done: v_s_ = 0 # terminal else: v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0] buffer_v_target = [] for r in buffer_r[::-1]: # reverse buffer r v_s_ = r + GAMMA * v_s_ buffer_v_target.append(v_s_) buffer_v_target.reverse() buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target) feed_dict = { self.AC.s: buffer_s, self.AC.a_his: buffer_a, self.AC.v_target: buffer_v_target, } test = self.AC.update_global(feed_dict) buffer_s, buffer_a, buffer_r = [], [], [] self.AC.pull_global() s = s_ total_step += 1 if done: if len(GLOBAL_RUNNING_R) == 0: # record running episode reward GLOBAL_RUNNING_R.append(ep_r) else: GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r) print( self.name, "Ep:", GLOBAL_EP, "| Ep_r: %i" % GLOBAL_RUNNING_R[-1], '| Var:', test, ) GLOBAL_EP += 1 break if __name__ == "__main__": SESS = tf.Session() with tf.device("/cpu:0"): OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA') OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params workers = [] # Create worker for i in range(N_WORKERS): i_name = 'W_%i' % i # worker name workers.append(Worker(i_name, GLOBAL_AC)) COORD = tf.train.Coordinator() SESS.run(tf.global_variables_initializer()) worker_threads = [] for worker in workers: job = lambda: worker.work() t = threading.Thread(target=job) t.start() worker_threads.append(t) COORD.join(worker_threads)
__init__.py
# -*- coding: utf-8 -*- """Miscellaneous helper functions (not wiki-dependent).""" # # (C) Pywikibot team, 2008-2019 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, division, unicode_literals import collections import gzip import hashlib from importlib import import_module import inspect import itertools import os import re import stat import subprocess import sys import threading import time import types try: from collections.abc import Iterator, Mapping except ImportError: # Python 2.7 from collections import Iterator, Mapping from datetime import datetime from distutils.version import Version from functools import wraps from warnings import catch_warnings, showwarning, warn from pywikibot.logging import debug PYTHON_VERSION = sys.version_info[:3] PY2 = (PYTHON_VERSION[0] == 2) if not PY2: from itertools import zip_longest import queue StringTypes = (str, bytes) UnicodeType = str else: from itertools import izip_longest as zip_longest import Queue as queue # noqa: N813 StringTypes = types.StringTypes UnicodeType = types.UnicodeType try: import bz2 except ImportError as bz2_import_error: try: import bz2file as bz2 warn('package bz2 was not found; using bz2file', ImportWarning) except ImportError: warn('package bz2 and bz2file were not found', ImportWarning) bz2 = bz2_import_error try: import lzma except ImportError as lzma_import_error: lzma = lzma_import_error if PYTHON_VERSION < (3, 5): # although deprecated in 3 completely no message was emitted until 3.5 ArgSpec = inspect.ArgSpec getargspec = inspect.getargspec else: ArgSpec = collections.namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults']) def getargspec(func): """Python 3 implementation using inspect.signature.""" sig = inspect.signature(func) args = [] defaults = [] varargs = None kwargs = None for p in sig.parameters.values(): if p.kind == inspect.Parameter.VAR_POSITIONAL: varargs = p.name elif p.kind == inspect.Parameter.VAR_KEYWORD: kwargs = p.name else: args += [p.name] if p.default != inspect.Parameter.empty: defaults += [p.default] if defaults: defaults = tuple(defaults) else: defaults = None return ArgSpec(args, varargs, kwargs, defaults) _logger = 'tools' # A mapping of characters to their MediaWiki title-cased forms. Python, # depending on version, handles these characters differently, which causes # errors in normalizing titles. (T200357) This dict was created using # Python 3.7 (Unicode version 11.0.0) and should be updated at least with every # new release of Python with an updated unicodedata.unidata_version. _first_upper_exception = { '\xdf': '\xdf', '\u0149': '\u0149', '\u0180': '\u0180', '\u019a': '\u019a', '\u01c5': '\u01c5', '\u01c6': '\u01c5', '\u01c8': '\u01c8', '\u01c9': '\u01c8', '\u01cb': '\u01cb', '\u01cc': '\u01cb', '\u01f0': '\u01f0', '\u01f2': '\u01f2', '\u01f3': '\u01f2', '\u023c': '\u023c', '\u023f': '\u023f', '\u0240': '\u0240', '\u0242': '\u0242', '\u0247': '\u0247', '\u0249': '\u0249', '\u024b': '\u024b', '\u024d': '\u024d', '\u024f': '\u024f', '\u0250': '\u0250', '\u0251': '\u0251', '\u0252': '\u0252', '\u025c': '\u025c', '\u0261': '\u0261', '\u0265': '\u0265', '\u0266': '\u0266', '\u026a': '\u026a', '\u026b': '\u026b', '\u026c': '\u026c', '\u0271': '\u0271', '\u027d': '\u027d', '\u0287': '\u0287', '\u0289': '\u0289', '\u028c': '\u028c', '\u029d': '\u029d', '\u029e': '\u029e', '\u0345': '\u0345', '\u0371': '\u0371', '\u0373': '\u0373', '\u0377': '\u0377', '\u037b': '\u037b', '\u037c': '\u037c', '\u037d': '\u037d', '\u0390': '\u0390', '\u03b0': '\u03b0', '\u03d7': '\u03d7', '\u03f2': '\u03a3', '\u03f3': '\u03f3', '\u03f8': '\u03f8', '\u03fb': '\u03fb', '\u04cf': '\u04cf', '\u04f7': '\u04f7', '\u04fb': '\u04fb', '\u04fd': '\u04fd', '\u04ff': '\u04ff', '\u0511': '\u0511', '\u0513': '\u0513', '\u0515': '\u0515', '\u0517': '\u0517', '\u0519': '\u0519', '\u051b': '\u051b', '\u051d': '\u051d', '\u051f': '\u051f', '\u0521': '\u0521', '\u0523': '\u0523', '\u0525': '\u0525', '\u0527': '\u0527', '\u0529': '\u0529', '\u052b': '\u052b', '\u052d': '\u052d', '\u052f': '\u052f', '\u0587': '\u0587', '\u10d0': '\u10d0', '\u10d1': '\u10d1', '\u10d2': '\u10d2', '\u10d3': '\u10d3', '\u10d4': '\u10d4', '\u10d5': '\u10d5', '\u10d6': '\u10d6', '\u10d7': '\u10d7', '\u10d8': '\u10d8', '\u10d9': '\u10d9', '\u10da': '\u10da', '\u10db': '\u10db', '\u10dc': '\u10dc', '\u10dd': '\u10dd', '\u10de': '\u10de', '\u10df': '\u10df', '\u10e0': '\u10e0', '\u10e1': '\u10e1', '\u10e2': '\u10e2', '\u10e3': '\u10e3', '\u10e4': '\u10e4', '\u10e5': '\u10e5', '\u10e6': '\u10e6', '\u10e7': '\u10e7', '\u10e8': '\u10e8', '\u10e9': '\u10e9', '\u10ea': '\u10ea', '\u10eb': '\u10eb', '\u10ec': '\u10ec', '\u10ed': '\u10ed', '\u10ee': '\u10ee', '\u10ef': '\u10ef', '\u10f0': '\u10f0', '\u10f1': '\u10f1', '\u10f2': '\u10f2', '\u10f3': '\u10f3', '\u10f4': '\u10f4', '\u10f5': '\u10f5', '\u10f6': '\u10f6', '\u10f7': '\u10f7', '\u10f8': '\u10f8', '\u10f9': '\u10f9', '\u10fa': '\u10fa', '\u10fd': '\u10fd', '\u10fe': '\u10fe', '\u10ff': '\u10ff', '\u13f8': '\u13f8', '\u13f9': '\u13f9', '\u13fa': '\u13fa', '\u13fb': '\u13fb', '\u13fc': '\u13fc', '\u13fd': '\u13fd', '\u1c80': '\u1c80', '\u1c81': '\u1c81', '\u1c82': '\u1c82', '\u1c83': '\u1c83', '\u1c84': '\u1c84', '\u1c85': '\u1c85', '\u1c86': '\u1c86', '\u1c87': '\u1c87', '\u1c88': '\u1c88', '\u1d79': '\u1d79', '\u1d7d': '\u1d7d', '\u1e96': '\u1e96', '\u1e97': '\u1e97', '\u1e98': '\u1e98', '\u1e99': '\u1e99', '\u1e9a': '\u1e9a', '\u1efb': '\u1efb', '\u1efd': '\u1efd', '\u1eff': '\u1eff', '\u1f50': '\u1f50', '\u1f52': '\u1f52', '\u1f54': '\u1f54', '\u1f56': '\u1f56', '\u1f71': '\u0386', '\u1f73': '\u0388', '\u1f75': '\u0389', '\u1f77': '\u038a', '\u1f79': '\u038c', '\u1f7b': '\u038e', '\u1f7d': '\u038f', '\u1f80': '\u1f88', '\u1f81': '\u1f89', '\u1f82': '\u1f8a', '\u1f83': '\u1f8b', '\u1f84': '\u1f8c', '\u1f85': '\u1f8d', '\u1f86': '\u1f8e', '\u1f87': '\u1f8f', '\u1f88': '\u1f88', '\u1f89': '\u1f89', '\u1f8a': '\u1f8a', '\u1f8b': '\u1f8b', '\u1f8c': '\u1f8c', '\u1f8d': '\u1f8d', '\u1f8e': '\u1f8e', '\u1f8f': '\u1f8f', '\u1f90': '\u1f98', '\u1f91': '\u1f99', '\u1f92': '\u1f9a', '\u1f93': '\u1f9b', '\u1f94': '\u1f9c', '\u1f95': '\u1f9d', '\u1f96': '\u1f9e', '\u1f97': '\u1f9f', '\u1f98': '\u1f98', '\u1f99': '\u1f99', '\u1f9a': '\u1f9a', '\u1f9b': '\u1f9b', '\u1f9c': '\u1f9c', '\u1f9d': '\u1f9d', '\u1f9e': '\u1f9e', '\u1f9f': '\u1f9f', '\u1fa0': '\u1fa8', '\u1fa1': '\u1fa9', '\u1fa2': '\u1faa', '\u1fa3': '\u1fab', '\u1fa4': '\u1fac', '\u1fa5': '\u1fad', '\u1fa6': '\u1fae', '\u1fa7': '\u1faf', '\u1fa8': '\u1fa8', '\u1fa9': '\u1fa9', '\u1faa': '\u1faa', '\u1fab': '\u1fab', '\u1fac': '\u1fac', '\u1fad': '\u1fad', '\u1fae': '\u1fae', '\u1faf': '\u1faf', '\u1fb2': '\u1fb2', '\u1fb3': '\u1fbc', '\u1fb4': '\u1fb4', '\u1fb6': '\u1fb6', '\u1fb7': '\u1fb7', '\u1fbc': '\u1fbc', '\u1fc2': '\u1fc2', '\u1fc3': '\u1fcc', '\u1fc4': '\u1fc4', '\u1fc6': '\u1fc6', '\u1fc7': '\u1fc7', '\u1fcc': '\u1fcc', '\u1fd2': '\u1fd2', '\u1fd3': '\u0390', '\u1fd6': '\u1fd6', '\u1fd7': '\u1fd7', '\u1fe2': '\u1fe2', '\u1fe3': '\u03b0', '\u1fe4': '\u1fe4', '\u1fe6': '\u1fe6', '\u1fe7': '\u1fe7', '\u1ff2': '\u1ff2', '\u1ff3': '\u1ffc', '\u1ff4': '\u1ff4', '\u1ff6': '\u1ff6', '\u1ff7': '\u1ff7', '\u1ffc': '\u1ffc', '\u214e': '\u214e', '\u2170': '\u2170', '\u2171': '\u2171', '\u2172': '\u2172', '\u2173': '\u2173', '\u2174': '\u2174', '\u2175': '\u2175', '\u2176': '\u2176', '\u2177': '\u2177', '\u2178': '\u2178', '\u2179': '\u2179', '\u217a': '\u217a', '\u217b': '\u217b', '\u217c': '\u217c', '\u217d': '\u217d', '\u217e': '\u217e', '\u217f': '\u217f', '\u2184': '\u2184', '\u24d0': '\u24d0', '\u24d1': '\u24d1', '\u24d2': '\u24d2', '\u24d3': '\u24d3', '\u24d4': '\u24d4', '\u24d5': '\u24d5', '\u24d6': '\u24d6', '\u24d7': '\u24d7', '\u24d8': '\u24d8', '\u24d9': '\u24d9', '\u24da': '\u24da', '\u24db': '\u24db', '\u24dc': '\u24dc', '\u24dd': '\u24dd', '\u24de': '\u24de', '\u24df': '\u24df', '\u24e0': '\u24e0', '\u24e1': '\u24e1', '\u24e2': '\u24e2', '\u24e3': '\u24e3', '\u24e4': '\u24e4', '\u24e5': '\u24e5', '\u24e6': '\u24e6', '\u24e7': '\u24e7', '\u24e8': '\u24e8', '\u24e9': '\u24e9', '\u2c30': '\u2c30', '\u2c31': '\u2c31', '\u2c32': '\u2c32', '\u2c33': '\u2c33', '\u2c34': '\u2c34', '\u2c35': '\u2c35', '\u2c36': '\u2c36', '\u2c37': '\u2c37', '\u2c38': '\u2c38', '\u2c39': '\u2c39', '\u2c3a': '\u2c3a', '\u2c3b': '\u2c3b', '\u2c3c': '\u2c3c', '\u2c3d': '\u2c3d', '\u2c3e': '\u2c3e', '\u2c3f': '\u2c3f', '\u2c40': '\u2c40', '\u2c41': '\u2c41', '\u2c42': '\u2c42', '\u2c43': '\u2c43', '\u2c44': '\u2c44', '\u2c45': '\u2c45', '\u2c46': '\u2c46', '\u2c47': '\u2c47', '\u2c48': '\u2c48', '\u2c49': '\u2c49', '\u2c4a': '\u2c4a', '\u2c4b': '\u2c4b', '\u2c4c': '\u2c4c', '\u2c4d': '\u2c4d', '\u2c4e': '\u2c4e', '\u2c4f': '\u2c4f', '\u2c50': '\u2c50', '\u2c51': '\u2c51', '\u2c52': '\u2c52', '\u2c53': '\u2c53', '\u2c54': '\u2c54', '\u2c55': '\u2c55', '\u2c56': '\u2c56', '\u2c57': '\u2c57', '\u2c58': '\u2c58', '\u2c59': '\u2c59', '\u2c5a': '\u2c5a', '\u2c5b': '\u2c5b', '\u2c5c': '\u2c5c', '\u2c5d': '\u2c5d', '\u2c5e': '\u2c5e', '\u2c61': '\u2c61', '\u2c65': '\u2c65', '\u2c66': '\u2c66', '\u2c68': '\u2c68', '\u2c6a': '\u2c6a', '\u2c6c': '\u2c6c', '\u2c73': '\u2c73', '\u2c76': '\u2c76', '\u2c81': '\u2c81', '\u2c83': '\u2c83', '\u2c85': '\u2c85', '\u2c87': '\u2c87', '\u2c89': '\u2c89', '\u2c8b': '\u2c8b', '\u2c8d': '\u2c8d', '\u2c8f': '\u2c8f', '\u2c91': '\u2c91', '\u2c93': '\u2c93', '\u2c95': '\u2c95', '\u2c97': '\u2c97', '\u2c99': '\u2c99', '\u2c9b': '\u2c9b', '\u2c9d': '\u2c9d', '\u2c9f': '\u2c9f', '\u2ca1': '\u2ca1', '\u2ca3': '\u2ca3', '\u2ca5': '\u2ca5', '\u2ca7': '\u2ca7', '\u2ca9': '\u2ca9', '\u2cab': '\u2cab', '\u2cad': '\u2cad', '\u2caf': '\u2caf', '\u2cb1': '\u2cb1', '\u2cb3': '\u2cb3', '\u2cb5': '\u2cb5', '\u2cb7': '\u2cb7', '\u2cb9': '\u2cb9', '\u2cbb': '\u2cbb', '\u2cbd': '\u2cbd', '\u2cbf': '\u2cbf', '\u2cc1': '\u2cc1', '\u2cc3': '\u2cc3', '\u2cc5': '\u2cc5', '\u2cc7': '\u2cc7', '\u2cc9': '\u2cc9', '\u2ccb': '\u2ccb', '\u2ccd': '\u2ccd', '\u2ccf': '\u2ccf', '\u2cd1': '\u2cd1', '\u2cd3': '\u2cd3', '\u2cd5': '\u2cd5', '\u2cd7': '\u2cd7', '\u2cd9': '\u2cd9', '\u2cdb': '\u2cdb', '\u2cdd': '\u2cdd', '\u2cdf': '\u2cdf', '\u2ce1': '\u2ce1', '\u2ce3': '\u2ce3', '\u2cec': '\u2cec', '\u2cee': '\u2cee', '\u2cf3': '\u2cf3', '\u2d00': '\u2d00', '\u2d01': '\u2d01', '\u2d02': '\u2d02', '\u2d03': '\u2d03', '\u2d04': '\u2d04', '\u2d05': '\u2d05', '\u2d06': '\u2d06', '\u2d07': '\u2d07', '\u2d08': '\u2d08', '\u2d09': '\u2d09', '\u2d0a': '\u2d0a', '\u2d0b': '\u2d0b', '\u2d0c': '\u2d0c', '\u2d0d': '\u2d0d', '\u2d0e': '\u2d0e', '\u2d0f': '\u2d0f', '\u2d10': '\u2d10', '\u2d11': '\u2d11', '\u2d12': '\u2d12', '\u2d13': '\u2d13', '\u2d14': '\u2d14', '\u2d15': '\u2d15', '\u2d16': '\u2d16', '\u2d17': '\u2d17', '\u2d18': '\u2d18', '\u2d19': '\u2d19', '\u2d1a': '\u2d1a', '\u2d1b': '\u2d1b', '\u2d1c': '\u2d1c', '\u2d1d': '\u2d1d', '\u2d1e': '\u2d1e', '\u2d1f': '\u2d1f', '\u2d20': '\u2d20', '\u2d21': '\u2d21', '\u2d22': '\u2d22', '\u2d23': '\u2d23', '\u2d24': '\u2d24', '\u2d25': '\u2d25', '\u2d27': '\u2d27', '\u2d2d': '\u2d2d', '\ua641': '\ua641', '\ua643': '\ua643', '\ua645': '\ua645', '\ua647': '\ua647', '\ua649': '\ua649', '\ua64b': '\ua64b', '\ua64d': '\ua64d', '\ua64f': '\ua64f', '\ua651': '\ua651', '\ua653': '\ua653', '\ua655': '\ua655', '\ua657': '\ua657', '\ua659': '\ua659', '\ua65b': '\ua65b', '\ua65d': '\ua65d', '\ua65f': '\ua65f', '\ua661': '\ua661', '\ua663': '\ua663', '\ua665': '\ua665', '\ua667': '\ua667', '\ua669': '\ua669', '\ua66b': '\ua66b', '\ua66d': '\ua66d', '\ua681': '\ua681', '\ua683': '\ua683', '\ua685': '\ua685', '\ua687': '\ua687', '\ua689': '\ua689', '\ua68b': '\ua68b', '\ua68d': '\ua68d', '\ua68f': '\ua68f', '\ua691': '\ua691', '\ua693': '\ua693', '\ua695': '\ua695', '\ua697': '\ua697', '\ua699': '\ua699', '\ua69b': '\ua69b', '\ua723': '\ua723', '\ua725': '\ua725', '\ua727': '\ua727', '\ua729': '\ua729', '\ua72b': '\ua72b', '\ua72d': '\ua72d', '\ua72f': '\ua72f', '\ua733': '\ua733', '\ua735': '\ua735', '\ua737': '\ua737', '\ua739': '\ua739', '\ua73b': '\ua73b', '\ua73d': '\ua73d', '\ua73f': '\ua73f', '\ua741': '\ua741', '\ua743': '\ua743', '\ua745': '\ua745', '\ua747': '\ua747', '\ua749': '\ua749', '\ua74b': '\ua74b', '\ua74d': '\ua74d', '\ua74f': '\ua74f', '\ua751': '\ua751', '\ua753': '\ua753', '\ua755': '\ua755', '\ua757': '\ua757', '\ua759': '\ua759', '\ua75b': '\ua75b', '\ua75d': '\ua75d', '\ua75f': '\ua75f', '\ua761': '\ua761', '\ua763': '\ua763', '\ua765': '\ua765', '\ua767': '\ua767', '\ua769': '\ua769', '\ua76b': '\ua76b', '\ua76d': '\ua76d', '\ua76f': '\ua76f', '\ua77a': '\ua77a', '\ua77c': '\ua77c', '\ua77f': '\ua77f', '\ua781': '\ua781', '\ua783': '\ua783', '\ua785': '\ua785', '\ua787': '\ua787', '\ua78c': '\ua78c', '\ua791': '\ua791', '\ua793': '\ua793', '\ua797': '\ua797', '\ua799': '\ua799', '\ua79b': '\ua79b', '\ua79d': '\ua79d', '\ua79f': '\ua79f', '\ua7a1': '\ua7a1', '\ua7a3': '\ua7a3', '\ua7a5': '\ua7a5', '\ua7a7': '\ua7a7', '\ua7a9': '\ua7a9', '\ua7b5': '\ua7b5', '\ua7b7': '\ua7b7', '\ua7b9': '\ua7b9', '\uab53': '\uab53', '\uab70': '\uab70', '\uab71': '\uab71', '\uab72': '\uab72', '\uab73': '\uab73', '\uab74': '\uab74', '\uab75': '\uab75', '\uab76': '\uab76', '\uab77': '\uab77', '\uab78': '\uab78', '\uab79': '\uab79', '\uab7a': '\uab7a', '\uab7b': '\uab7b', '\uab7c': '\uab7c', '\uab7d': '\uab7d', '\uab7e': '\uab7e', '\uab7f': '\uab7f', '\uab80': '\uab80', '\uab81': '\uab81', '\uab82': '\uab82', '\uab83': '\uab83', '\uab84': '\uab84', '\uab85': '\uab85', '\uab86': '\uab86', '\uab87': '\uab87', '\uab88': '\uab88', '\uab89': '\uab89', '\uab8a': '\uab8a', '\uab8b': '\uab8b', '\uab8c': '\uab8c', '\uab8d': '\uab8d', '\uab8e': '\uab8e', '\uab8f': '\uab8f', '\uab90': '\uab90', '\uab91': '\uab91', '\uab92': '\uab92', '\uab93': '\uab93', '\uab94': '\uab94', '\uab95': '\uab95', '\uab96': '\uab96', '\uab97': '\uab97', '\uab98': '\uab98', '\uab99': '\uab99', '\uab9a': '\uab9a', '\uab9b': '\uab9b', '\uab9c': '\uab9c', '\uab9d': '\uab9d', '\uab9e': '\uab9e', '\uab9f': '\uab9f', '\uaba0': '\uaba0', '\uaba1': '\uaba1', '\uaba2': '\uaba2', '\uaba3': '\uaba3', '\uaba4': '\uaba4', '\uaba5': '\uaba5', '\uaba6': '\uaba6', '\uaba7': '\uaba7', '\uaba8': '\uaba8', '\uaba9': '\uaba9', '\uabaa': '\uabaa', '\uabab': '\uabab', '\uabac': '\uabac', '\uabad': '\uabad', '\uabae': '\uabae', '\uabaf': '\uabaf', '\uabb0': '\uabb0', '\uabb1': '\uabb1', '\uabb2': '\uabb2', '\uabb3': '\uabb3', '\uabb4': '\uabb4', '\uabb5': '\uabb5', '\uabb6': '\uabb6', '\uabb7': '\uabb7', '\uabb8': '\uabb8', '\uabb9': '\uabb9', '\uabba': '\uabba', '\uabbb': '\uabbb', '\uabbc': '\uabbc', '\uabbd': '\uabbd', '\uabbe': '\uabbe', '\uabbf': '\uabbf', '\ufb00': '\ufb00', '\ufb01': '\ufb01', '\ufb02': '\ufb02', '\ufb03': '\ufb03', '\ufb04': '\ufb04', '\ufb05': '\ufb05', '\ufb06': '\ufb06', '\ufb13': '\ufb13', '\ufb14': '\ufb14', '\ufb15': '\ufb15', '\ufb16': '\ufb16', '\ufb17': '\ufb17', '\U0001044e': '\U0001044e', '\U0001044f': '\U0001044f', '\U000104d8': '\U000104d8', '\U000104d9': '\U000104d9', '\U000104da': '\U000104da', '\U000104db': '\U000104db', '\U000104dc': '\U000104dc', '\U000104dd': '\U000104dd', '\U000104de': '\U000104de', '\U000104df': '\U000104df', '\U000104e0': '\U000104e0', '\U000104e1': '\U000104e1', '\U000104e2': '\U000104e2', '\U000104e3': '\U000104e3', '\U000104e4': '\U000104e4', '\U000104e5': '\U000104e5', '\U000104e6': '\U000104e6', '\U000104e7': '\U000104e7', '\U000104e8': '\U000104e8', '\U000104e9': '\U000104e9', '\U000104ea': '\U000104ea', '\U000104eb': '\U000104eb', '\U000104ec': '\U000104ec', '\U000104ed': '\U000104ed', '\U000104ee': '\U000104ee', '\U000104ef': '\U000104ef', '\U000104f0': '\U000104f0', '\U000104f1': '\U000104f1', '\U000104f2': '\U000104f2', '\U000104f3': '\U000104f3', '\U000104f4': '\U000104f4', '\U000104f5': '\U000104f5', '\U000104f6': '\U000104f6', '\U000104f7': '\U000104f7', '\U000104f8': '\U000104f8', '\U000104f9': '\U000104f9', '\U000104fa': '\U000104fa', '\U000104fb': '\U000104fb', '\U00010cc0': '\U00010cc0', '\U00010cc1': '\U00010cc1', '\U00010cc2': '\U00010cc2', '\U00010cc3': '\U00010cc3', '\U00010cc4': '\U00010cc4', '\U00010cc5': '\U00010cc5', '\U00010cc6': '\U00010cc6', '\U00010cc7': '\U00010cc7', '\U00010cc8': '\U00010cc8', '\U00010cc9': '\U00010cc9', '\U00010cca': '\U00010cca', '\U00010ccb': '\U00010ccb', '\U00010ccc': '\U00010ccc', '\U00010ccd': '\U00010ccd', '\U00010cce': '\U00010cce', '\U00010ccf': '\U00010ccf', '\U00010cd0': '\U00010cd0', '\U00010cd1': '\U00010cd1', '\U00010cd2': '\U00010cd2', '\U00010cd3': '\U00010cd3', '\U00010cd4': '\U00010cd4', '\U00010cd5': '\U00010cd5', '\U00010cd6': '\U00010cd6', '\U00010cd7': '\U00010cd7', '\U00010cd8': '\U00010cd8', '\U00010cd9': '\U00010cd9', '\U00010cda': '\U00010cda', '\U00010cdb': '\U00010cdb', '\U00010cdc': '\U00010cdc', '\U00010cdd': '\U00010cdd', '\U00010cde': '\U00010cde', '\U00010cdf': '\U00010cdf', '\U00010ce0': '\U00010ce0', '\U00010ce1': '\U00010ce1', '\U00010ce2': '\U00010ce2', '\U00010ce3': '\U00010ce3', '\U00010ce4': '\U00010ce4', '\U00010ce5': '\U00010ce5', '\U00010ce6': '\U00010ce6', '\U00010ce7': '\U00010ce7', '\U00010ce8': '\U00010ce8', '\U00010ce9': '\U00010ce9', '\U00010cea': '\U00010cea', '\U00010ceb': '\U00010ceb', '\U00010cec': '\U00010cec', '\U00010ced': '\U00010ced', '\U00010cee': '\U00010cee', '\U00010cef': '\U00010cef', '\U00010cf0': '\U00010cf0', '\U00010cf1': '\U00010cf1', '\U00010cf2': '\U00010cf2', '\U000118c0': '\U000118c0', '\U000118c1': '\U000118c1', '\U000118c2': '\U000118c2', '\U000118c3': '\U000118c3', '\U000118c4': '\U000118c4', '\U000118c5': '\U000118c5', '\U000118c6': '\U000118c6', '\U000118c7': '\U000118c7', '\U000118c8': '\U000118c8', '\U000118c9': '\U000118c9', '\U000118ca': '\U000118ca', '\U000118cb': '\U000118cb', '\U000118cc': '\U000118cc', '\U000118cd': '\U000118cd', '\U000118ce': '\U000118ce', '\U000118cf': '\U000118cf', '\U000118d0': '\U000118d0', '\U000118d1': '\U000118d1', '\U000118d2': '\U000118d2', '\U000118d3': '\U000118d3', '\U000118d4': '\U000118d4', '\U000118d5': '\U000118d5', '\U000118d6': '\U000118d6', '\U000118d7': '\U000118d7', '\U000118d8': '\U000118d8', '\U000118d9': '\U000118d9', '\U000118da': '\U000118da', '\U000118db': '\U000118db', '\U000118dc': '\U000118dc', '\U000118dd': '\U000118dd', '\U000118de': '\U000118de', '\U000118df': '\U000118df', '\U00016e60': '\U00016e60', '\U00016e61': '\U00016e61', '\U00016e62': '\U00016e62', '\U00016e63': '\U00016e63', '\U00016e64': '\U00016e64', '\U00016e65': '\U00016e65', '\U00016e66': '\U00016e66', '\U00016e67': '\U00016e67', '\U00016e68': '\U00016e68', '\U00016e69': '\U00016e69', '\U00016e6a': '\U00016e6a', '\U00016e6b': '\U00016e6b', '\U00016e6c': '\U00016e6c', '\U00016e6d': '\U00016e6d', '\U00016e6e': '\U00016e6e', '\U00016e6f': '\U00016e6f', '\U00016e70': '\U00016e70', '\U00016e71': '\U00016e71', '\U00016e72': '\U00016e72', '\U00016e73': '\U00016e73', '\U00016e74': '\U00016e74', '\U00016e75': '\U00016e75', '\U00016e76': '\U00016e76', '\U00016e77': '\U00016e77', '\U00016e78': '\U00016e78', '\U00016e79': '\U00016e79', '\U00016e7a': '\U00016e7a', '\U00016e7b': '\U00016e7b', '\U00016e7c': '\U00016e7c', '\U00016e7d': '\U00016e7d', '\U00016e7e': '\U00016e7e', '\U00016e7f': '\U00016e7f', '\U0001e922': '\U0001e922', '\U0001e923': '\U0001e923', '\U0001e924': '\U0001e924', '\U0001e925': '\U0001e925', '\U0001e926': '\U0001e926', '\U0001e927': '\U0001e927', '\U0001e928': '\U0001e928', '\U0001e929': '\U0001e929', '\U0001e92a': '\U0001e92a', '\U0001e92b': '\U0001e92b', '\U0001e92c': '\U0001e92c', '\U0001e92d': '\U0001e92d', '\U0001e92e': '\U0001e92e', '\U0001e92f': '\U0001e92f', '\U0001e930': '\U0001e930', '\U0001e931': '\U0001e931', '\U0001e932': '\U0001e932', '\U0001e933': '\U0001e933', '\U0001e934': '\U0001e934', '\U0001e935': '\U0001e935', '\U0001e936': '\U0001e936', '\U0001e937': '\U0001e937', '\U0001e938': '\U0001e938', '\U0001e939': '\U0001e939', '\U0001e93a': '\U0001e93a', '\U0001e93b': '\U0001e93b', '\U0001e93c': '\U0001e93c', '\U0001e93d': '\U0001e93d', '\U0001e93e': '\U0001e93e', '\U0001e93f': '\U0001e93f', '\U0001e940': '\U0001e940', '\U0001e941': '\U0001e941', '\U0001e942': '\U0001e942', }.get class _NotImplementedWarning(RuntimeWarning): """Feature that is no longer implemented.""" pass class NotImplementedClass(object): """No implementation is available.""" def __init__(self, *args, **kwargs): """Initializer.""" raise NotImplementedError( '%s: %s' % (self.__class__.__name__, self.__doc__)) def has_module(module): """Check whether a module can be imported.""" try: import_module(module) except ImportError: return False else: return True def empty_iterator(): # http://stackoverflow.com/a/13243870/473890 """An iterator which does nothing.""" return yield def py2_encode_utf_8(func): """Decorator to optionally encode the string result of func on Python 2.""" if PY2: return lambda s: func(s).encode('utf-8') else: return func class classproperty(object): # noqa: N801 """ Descriptor class to access a class method as a property. This class may be used as a decorator:: class Foo(object): _bar = 'baz' # a class property @classproperty def bar(cls): # a class property method return cls._bar Foo.bar gives 'baz'. """ def __init__(self, cls_method): """Hold the class method.""" self.method = cls_method self.__doc__ = self.method.__doc__ def __get__(self, instance, owner): """Get the attribute of the owner class by its method.""" return self.method(owner) class suppress_warnings(catch_warnings): # noqa: N801 """A decorator/context manager that temporarily suppresses warnings. Those suppressed warnings that do not match the parameters will be raised shown upon exit. """ def __init__(self, message='', category=Warning, filename=''): """Initialize the object. The parameter semantics are similar to those of `warnings.filterwarnings`. @param message: A string containing a regular expression that the start of the warning message must match. (case-insensitive) @type message: str @param category: A class (a subclass of Warning) of which the warning category must be a subclass in order to match. @type category: type @param filename: A string containing a regular expression that the start of the path to the warning module must match. (case-sensitive) @type filename: str """ self.message_match = re.compile(message, re.I).match self.category = category self.filename_match = re.compile(filename).match super(suppress_warnings, self).__init__(record=True) def __enter__(self): """Catch all warnings and store them in `self.log`.""" self.log = super(suppress_warnings, self).__enter__() def __exit__(self, exc_type, exc_val, exc_tb): """Stop logging warnings and show those that do not match to params.""" super(suppress_warnings, self).__exit__() for warning in self.log: if ( not issubclass(warning.category, self.category) or not self.message_match(str(warning.message)) or not self.filename_match(warning.filename) ): showwarning( warning.message, warning.category, warning.filename, warning.lineno, warning.file, warning.line) def __call__(self, func): """Decorate func to suppress warnings.""" @wraps(func) def suppressed_func(*args, **kwargs): with self: return func(*args, **kwargs) return suppressed_func class UnicodeMixin(object): """Mixin class to add __str__ method in Python 2 or 3.""" @py2_encode_utf_8 def __str__(self): """Return the unicode representation as the str representation.""" return self.__unicode__() # From http://python3porting.com/preparing.html class ComparableMixin(object): """Mixin class to allow comparing to other objects which are comparable.""" def __lt__(self, other): """Compare if self is less than other.""" return other > self._cmpkey() def __le__(self, other): """Compare if self is less equals other.""" return other >= self._cmpkey() def __eq__(self, other): """Compare if self is equal to other.""" return other == self._cmpkey() def __ge__(self, other): """Compare if self is greater equals other.""" return other <= self._cmpkey() def __gt__(self, other): """Compare if self is greater than other.""" return other < self._cmpkey() def __ne__(self, other): """Compare if self is not equal to other.""" return other != self._cmpkey() class DotReadableDict(UnicodeMixin): """Parent class of Revision() and FileInfo(). Provide: - __getitem__(), __unicode__() and __repr__(). """ def __getitem__(self, key): """Give access to class values by key. Revision class may also give access to its values by keys e.g. revid parameter may be assigned by revision['revid'] as well as revision.revid. This makes formatting strings with % operator easier. """ return getattr(self, key) def __unicode__(self): """Return string representation.""" # TODO: This is more efficient if the PY2 test is done during # class instantiation, and not inside the method. if not PY2: return repr(self.__dict__) else: _content = ', '.join( '{0}: {1}'.format(k, v) for k, v in self.__dict__.items()) return '{{{0}}}'.format(_content) def __repr__(self): """Return a more complete string representation.""" return repr(self.__dict__) class FrozenDict(dict): """ Frozen dict, preventing write after initialisation. Raises TypeError if write attempted. """ def __init__(self, data=None, error=None): """ Initializer. @param data: mapping to freeze @type data: mapping @param error: error message @type error: basestring """ if data: args = [data] else: args = [] super(FrozenDict, self).__init__(*args) self._error = error or 'FrozenDict: not writable' def update(self, *args, **kwargs): """Prevent updates.""" raise TypeError(self._error) __setitem__ = update class LazyRegex(object): """ Regex object that obtains and compiles the regex on usage. Instances behave like the object created using L{re.compile}. """ def __init__(self, pattern, flags=0): """ Initializer. @param pattern: L{re} regex pattern @type pattern: str or callable @param flags: L{re.compile} flags @type flags: int """ self.raw = pattern self.flags = flags super(LazyRegex, self).__init__() @property def raw(self): """The raw property.""" if callable(self._raw): self._raw = self._raw() return self._raw @raw.setter def raw(self, value): self._raw = value self._compiled = None @property def flags(self): """The flags property.""" return self._flags @flags.setter def flags(self, value): self._flags = value self._compiled = None def __getattr__(self, attr): """Compile the regex and delegate all attribute to the regex.""" if self._raw: if not self._compiled: self._compiled = re.compile(self.raw, self.flags) if hasattr(self._compiled, attr): return getattr(self._compiled, attr) raise AttributeError('%s: attr %s not recognised' % (self.__class__.__name__, attr)) else: raise AttributeError('%s.raw not set' % self.__class__.__name__) class DeprecatedRegex(LazyRegex): """Regex object that issues a deprecation notice.""" def __init__(self, pattern, flags=0, name=None, instead=None, since=None): """ Initializer. If name is None, the regex pattern will be used as part of the deprecation warning. @param name: name of the object that is deprecated @type name: str or None @param instead: if provided, will be used to specify the replacement of the deprecated name @type instead: str """ super(DeprecatedRegex, self).__init__(pattern, flags) self._name = name or self.raw self._instead = instead self._since = since def __getattr__(self, attr): """Issue deprecation warning.""" issue_deprecation_warning( self._name, self._instead, 2, since=self._since) return super(DeprecatedRegex, self).__getattr__(attr) def first_lower(string): """ Return a string with the first character uncapitalized. Empty strings are supported. The original string is not changed. """ return string[:1].lower() + string[1:] def first_upper(string): """ Return a string with the first character capitalized. Empty strings are supported. The original string is not changed. @note: MediaWiki doesn't capitalize some characters the same way as Python. This function tries to be close to MediaWiki's capitalize function in title.php. See T179115 and T200357. """ first = string[:1] return (_first_upper_exception(first) or first.upper()) + string[1:] def normalize_username(username): """Normalize the username.""" if not username: return None username = re.sub('[_ ]+', ' ', username).strip() return first_upper(username) class MediaWikiVersion(Version): """ Version object to allow comparing 'wmf' versions with normal ones. The version mainly consist of digits separated by periods. After that is a suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or '-rc.<number>' (the - and . are optional). They are considered from old to new in that order with a version number without suffix is considered the newest. This secondary difference is stored in an internal _dev_version attribute. Two versions are equal if their normal version and dev version are equal. A version is greater if the normal version or dev version is greater. For example: 1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2 < 1.25-rc-1 < 1.25-rc.2 < 1.25 Any other suffixes are considered invalid. """ MEDIAWIKI_VERSION = re.compile( r'^(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$') @classmethod def from_generator(cls, generator): """Create instance using the generator string.""" if not generator.startswith('MediaWiki '): raise ValueError('Generator string ({0!r}) must start with ' '"MediaWiki "'.format(generator)) return cls(generator[len('MediaWiki '):]) def parse(self, vstring): """Parse version string.""" version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring) if not version_match: raise ValueError('Invalid version number "{0}"'.format(vstring)) components = [int(n) for n in version_match.group(1).split('.')] # The _dev_version numbering scheme might change. E.g. if a stage # between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases # are reassigned (beta=3, rc=4, stable=5). if version_match.group(3): # wmf version self._dev_version = (0, int(version_match.group(3))) elif version_match.group(4): self._dev_version = (2, int(version_match.group(4))) elif version_match.group(5): self._dev_version = (3, int(version_match.group(5))) elif version_match.group(2) in ('alpha', '-alpha'): self._dev_version = (1, ) else: for handled in ('wmf', 'alpha', 'beta', 'rc'): # if any of those pops up here our parser has failed assert handled not in version_match.group(2), \ 'Found "{0}" in "{1}"'.format(handled, version_match.group(2)) if version_match.group(2): debug('Additional unused version part ' '"{0}"'.format(version_match.group(2)), _logger) self._dev_version = (4, ) self.suffix = version_match.group(2) or '' self.version = tuple(components) def __str__(self): """Return version number with optional suffix.""" return '.'.join(str(v) for v in self.version) + self.suffix def _cmp(self, other): if isinstance(other, StringTypes): other = MediaWikiVersion(other) if self.version > other.version: return 1 if self.version < other.version: return -1 if self._dev_version > other._dev_version: return 1 if self._dev_version < other._dev_version: return -1 return 0 if PY2: __cmp__ = _cmp class ThreadedGenerator(threading.Thread): """Look-ahead generator class. Runs a generator in a separate thread and queues the results; can be called like a regular generator. Subclasses should override self.generator, I{not} self.run Important: the generator thread will stop itself if the generator's internal queue is exhausted; but, if the calling program does not use all the generated values, it must call the generator's stop() method to stop the background thread. Example usage: >>> gen = ThreadedGenerator(target=range, args=(20,)) >>> try: ... data = list(gen) ... finally: ... gen.stop() >>> data [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] """ def __init__(self, group=None, target=None, name='GeneratorThread', args=(), kwargs=None, qsize=65536): """Initializer. Takes same keyword arguments as threading.Thread. target must be a generator function (or other callable that returns an iterable object). @param qsize: The size of the lookahead queue. The larger the qsize, the more values will be computed in advance of use (which can eat up memory and processor time). @type qsize: int """ if kwargs is None: kwargs = {} if target: self.generator = target if not hasattr(self, 'generator'): raise RuntimeError('No generator for ThreadedGenerator to run.') self.args, self.kwargs = args, kwargs threading.Thread.__init__(self, group=group, name=name) self.queue = queue.Queue(qsize) self.finished = threading.Event() def __iter__(self): """Iterate results from the queue.""" if not self.isAlive() and not self.finished.isSet(): self.start() # if there is an item in the queue, yield it, otherwise wait while not self.finished.isSet(): try: yield self.queue.get(True, 0.25) except queue.Empty: pass except KeyboardInterrupt: self.stop() def stop(self): """Stop the background thread.""" self.finished.set() def run(self): """Run the generator and store the results on the queue.""" iterable = any(hasattr(self.generator, key) for key in ('__iter__', '__getitem__')) if iterable and not self.args and not self.kwargs: self.__gen = self.generator else: self.__gen = self.generator(*self.args, **self.kwargs) for result in self.__gen: while True: if self.finished.isSet(): return try: self.queue.put_nowait(result) except queue.Full: time.sleep(0.25) continue break # wait for queue to be emptied, then kill the thread while not self.finished.isSet() and not self.queue.empty(): time.sleep(0.25) self.stop() def itergroup(iterable, size): """Make an iterator that returns lists of (up to) size items from iterable. Example: >>> i = itergroup(range(25), 10) >>> print(next(i)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> print(next(i)) [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] >>> print(next(i)) [20, 21, 22, 23, 24] >>> print(next(i)) Traceback (most recent call last): ... StopIteration """ group = [] for item in iterable: group.append(item) if len(group) == size: yield group group = [] if group: yield group def islice_with_ellipsis(iterable, *args, **kwargs): """ Generator which yields the first n elements of the iterable. If more elements are available and marker is True, it returns an extra string marker as continuation mark. Function takes the and the additional keyword marker. @param iterable: the iterable to work on @type iterable: iterable @param args: same args as: - C{itertools.islice(iterable, stop)} - C{itertools.islice(iterable, start, stop[, step])} @keyword marker: element to yield if iterable still contains elements after showing the required number. Default value: '…' No other kwargs are considered. @type marker: str """ s = slice(*args) marker = kwargs.pop('marker', '…') try: k, v = kwargs.popitem() raise TypeError( "islice_with_ellipsis() take only 'marker' as keyword arg, not %s" % k) except KeyError: pass _iterable = iter(iterable) for el in itertools.islice(_iterable, *args): yield el if marker and s.stop is not None: try: next(_iterable) except StopIteration: pass else: yield marker class ThreadList(list): """A simple threadpool class to limit the number of simultaneous threads. Any threading.Thread object can be added to the pool using the append() method. If the maximum number of simultaneous threads has not been reached, the Thread object will be started immediately; if not, the append() call will block until the thread is able to start. >>> pool = ThreadList(limit=10) >>> def work(): ... time.sleep(1) ... >>> for x in range(20): ... pool.append(threading.Thread(target=work)) ... """ _logger = 'threadlist' def __init__(self, limit=128, *args): """Initializer.""" self.limit = limit super(ThreadList, self).__init__(*args) for item in self: if not isinstance(threading.Thread, item): raise TypeError("Cannot add '%s' to ThreadList" % type(item)) def active_count(self): """Return the number of alive threads and delete all non-alive ones.""" cnt = 0 for item in self[:]: if item.isAlive(): cnt += 1 else: self.remove(item) return cnt def append(self, thd): """Add a thread to the pool and start it.""" if not isinstance(thd, threading.Thread): raise TypeError("Cannot append '%s' to ThreadList" % type(thd)) while self.active_count() >= self.limit: time.sleep(2) super(ThreadList, self).append(thd) thd.start() debug("thread %d ('%s') started" % (len(self), type(thd)), self._logger) def stop_all(self): """Stop all threads the pool.""" if self: debug('EARLY QUIT: Threads: %d' % len(self), self._logger) for thd in self: thd.stop() debug('EARLY QUIT: Queue size left in %s: %s' % (thd, thd.queue.qsize()), self._logger) def intersect_generators(genlist): """ Intersect generators listed in genlist. Yield items only if they are yielded by all generators in genlist. Threads (via ThreadedGenerator) are used in order to run generators in parallel, so that items can be yielded before generators are exhausted. Threads are stopped when they are either exhausted or Ctrl-C is pressed. Quitting before all generators are finished is attempted if there is no more chance of finding an item in all queues. @param genlist: list of page generators @type genlist: list """ # If any generator is empty, no pages are going to be returned for source in genlist: if not source: debug('At least one generator ({0!r}) is empty and execution was ' 'skipped immediately.'.format(source), 'intersect') return # Item is cached to check that it is found n_gen # times before being yielded. cache = collections.defaultdict(set) n_gen = len(genlist) # Class to keep track of alive threads. # Start new threads and remove completed threads. thrlist = ThreadList() for source in genlist: threaded_gen = ThreadedGenerator(name=repr(source), target=source) threaded_gen.daemon = True thrlist.append(threaded_gen) while True: # Get items from queues in a round-robin way. for t in thrlist: try: # TODO: evaluate if True and timeout is necessary. item = t.queue.get(True, 0.1) # Cache entry is a set of thread. # Duplicates from same thread are not counted twice. cache[item].add(t) if len(cache[item]) == n_gen: yield item # Remove item from cache. # No chance of seeing it again (see later: early stop). cache.pop(item) active = thrlist.active_count() max_cache = n_gen if cache.values(): max_cache = max(len(v) for v in cache.values()) # No. of active threads is not enough to reach n_gen. # We can quit even if some thread is still active. # There could be an item in all generators which has not yet # appeared from any generator. Only when we have lost one # generator, then we can bail out early based on seen items. if active < n_gen and n_gen - max_cache > active: thrlist.stop_all() return except queue.Empty: pass except KeyboardInterrupt: thrlist.stop_all() finally: # All threads are done. if thrlist.active_count() == 0: return def roundrobin_generators(*iterables): """Yield simultaneous from each iterable. Sample: >>> tuple(roundrobin_generators('ABC', range(5))) ('A', 0, 'B', 1, 'C', 2, 3, 4) @param iterables: any iterable to combine in roundrobin way @type iterables: iterable @return: the combined generator of iterables @rtype: generator """ return (item for item in itertools.chain.from_iterable(zip_longest(*iterables)) if item is not None) def filter_unique(iterable, container=None, key=None, add=None): """ Yield unique items from an iterable, omitting duplicates. By default, to provide uniqueness, it puts the generated items into a set created as a local variable. It only yields items which are not already present in the local set. For large collections, this is not memory efficient, as a strong reference to every item is kept in a local set which can not be cleared. Also, the local set can't be re-used when chaining unique operations on multiple generators. To avoid these issues, it is advisable for the caller to provide their own container and set the key parameter to be the function L{hash}, or use a L{weakref} as the key. The container can be any object that supports __contains__. If the container is a set or dict, the method add or __setitem__ will be used automatically. Any other method may be provided explicitly using the add parameter. Beware that key=id is only useful for cases where id() is not unique. Note: This is not thread safe. @param iterable: the source iterable @type iterable: collections.abc.Iterable @param container: storage of seen items @type container: type @param key: function to convert the item to a key @type key: callable @param add: function to add an item to the container @type add: callable """ if container is None: container = set() if not add: if hasattr(container, 'add'): def container_add(x): container.add(key(x) if key else x) add = container_add else: def container_setitem(x): container.__setitem__(key(x) if key else x, True) add = container_setitem for item in iterable: try: if (key(item) if key else item) not in container: add(item) yield item except StopIteration: return class CombinedError(KeyError, IndexError): """An error that gets caught by both KeyError and IndexError.""" class EmptyDefault(str, Mapping): """ A default for a not existing siteinfo property. It should be chosen if there is no better default known. It acts like an empty collections, so it can be iterated through it safely if treated as a list, tuple, set or dictionary. It is also basically an empty string. Accessing a value via __getitem__ will result in an combined KeyError and IndexError. """ def __init__(self): """Initialise the default as an empty string.""" str.__init__(self) def _empty_iter(self): """An iterator which does nothing and drops the argument.""" return empty_iterator() def __getitem__(self, key): """Raise always a L{CombinedError}.""" raise CombinedError(key) iteritems = itervalues = iterkeys = __iter__ = _empty_iter EMPTY_DEFAULT = EmptyDefault() class SelfCallMixin(object): """ Return self when called. When '_own_desc' is defined it'll also issue a deprecation warning using issue_deprecation_warning('Calling ' + _own_desc, 'it directly'). """ def __call__(self): """Do nothing and just return itself.""" if hasattr(self, '_own_desc'): issue_deprecation_warning('Calling {0}'.format(self._own_desc), 'it directly', 2, since='20150515') return self class SelfCallDict(SelfCallMixin, dict): """Dict with SelfCallMixin.""" class SelfCallString(SelfCallMixin, str): """Unicode string with SelfCallMixin.""" class IteratorNextMixin(Iterator): """Backwards compatibility for Iterators.""" if PY2: def next(self): """Python 2 next.""" return self.__next__() class DequeGenerator(IteratorNextMixin, collections.deque): """A generator that allows items to be added during generating.""" def __next__(self): """Python 3 iterator method.""" if len(self): return self.popleft() else: raise StopIteration def open_archive(filename, mode='rb', use_extension=True): """ Open a file and uncompress it if needed. This function supports bzip2, gzip, 7zip, lzma, and xz as compression containers. It uses the packages available in the standard library for bzip2, gzip, lzma, and xz so they are always available. 7zip is only available when a 7za program is available and only supports reading from it. The compression is either selected via the magic number or file ending. @param filename: The filename. @type filename: str @param use_extension: Use the file extension instead of the magic number to determine the type of compression (default True). Must be True when writing or appending. @type use_extension: bool @param mode: The mode in which the file should be opened. It may either be 'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary mode. It defaults to 'rb'. @type mode: str @raises ValueError: When 7za is not available or the opening mode is unknown or it tries to write a 7z archive. @raises FileNotFoundError: When the filename doesn't exist and it tries to read from it or it tries to determine the compression algorithm (or IOError on Python 2). @raises OSError: When it's not a 7z archive but the file extension is 7z. It is also raised by bz2 when its content is invalid. gzip does not immediately raise that error but only on reading it. @raises lzma.LZMAError: When error occurs during compression or decompression or when initializing the state with lzma or xz. @raises ImportError: When file is compressed with bz2 but neither bz2 nor bz2file is importable, or when file is compressed with lzma or xz but lzma is not importable. @return: A file-like object returning the uncompressed data in binary mode. @rtype: file-like object """ if mode in ('r', 'a', 'w'): mode += 'b' elif mode not in ('rb', 'ab', 'wb'): raise ValueError('Invalid mode: "{0}"'.format(mode)) if use_extension: # if '.' not in filename, it'll be 1 character long but otherwise # contain the period extension = filename[filename.rfind('.'):][1:] else: if mode != 'rb': raise ValueError('Magic number detection only when reading') with open(filename, 'rb') as f: magic_number = f.read(8) if magic_number.startswith(b'BZh'): extension = 'bz2' elif magic_number.startswith(b'\x1F\x8B\x08'): extension = 'gz' elif magic_number.startswith(b"7z\xBC\xAF'\x1C"): extension = '7z' # Unfortunately, legacy LZMA container format has no magic number elif magic_number.startswith(b'\xFD7zXZ\x00'): extension = 'xz' else: extension = '' if extension == 'bz2': if isinstance(bz2, ImportError): raise bz2 return bz2.BZ2File(filename, mode) if extension == 'gz': return gzip.open(filename, mode) if extension == '7z': if mode != 'rb': raise NotImplementedError('It is not possible to write a 7z file.') try: process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=65535) except OSError: raise ValueError('7za is not installed or cannot ' 'uncompress "{0}"'.format(filename)) else: stderr = process.stderr.read() process.stderr.close() if stderr != b'': process.stdout.close() raise OSError( 'Unexpected STDERR output from 7za {0}'.format(stderr)) else: return process.stdout if extension == 'lzma': if isinstance(lzma, ImportError): raise lzma return lzma.open(filename, mode, format=lzma.FORMAT_ALONE) if extension == 'xz': if isinstance(lzma, ImportError): raise lzma return lzma.open(filename, mode, format=lzma.FORMAT_XZ) # assume it's an uncompressed file return open(filename, 'rb') def merge_unique_dicts(*args, **kwargs): """ Return a merged dict and make sure that the original dicts keys are unique. The positional arguments are the dictionaries to be merged. It is also possible to define an additional dict using the keyword arguments. """ args = list(args) + [dict(kwargs)] conflicts = set() result = {} for arg in args: conflicts |= set(arg.keys()) & set(result.keys()) result.update(arg) if conflicts: raise ValueError('Multiple dicts contain the same keys: {0}' .format(', '.join(sorted(UnicodeType(key) for key in conflicts)))) return result # Decorators # # Decorator functions without parameters are _invoked_ differently from # decorator functions with function syntax. For example, @deprecated causes # a different invocation to @deprecated(). # The former is invoked with the decorated function as args[0]. # The latter is invoked with the decorator arguments as *args & **kwargs, # and it must return a callable which will be invoked with the decorated # function as args[0]. # The follow deprecators may support both syntax, e.g. @deprecated and # @deprecated() both work. In order to achieve that, the code inspects # args[0] to see if it callable. Therefore, a decorator must not accept # only one arg, and that arg be a callable, as it will be detected as # a deprecator without any arguments. def signature(obj): """ Safely return function Signature object (PEP 362). inspect.signature was introduced in 3.3, however backports are available. Any exception calling inspect.signature is ignored and None is returned. @param obj: Function to inspect @type obj: callable @rtype: inpect.Signature or None """ try: return inspect.signature(obj) except (AttributeError, ValueError): return None def add_decorated_full_name(obj, stacklevel=1): """Extract full object name, including class, and store in __full_name__. This must be done on all decorators that are chained together, otherwise the second decorator will have the wrong full name. @param obj: A object being decorated @type obj: object @param stacklevel: level to use @type stacklevel: int """ if hasattr(obj, '__full_name__'): return # The current frame is add_decorated_full_name # The next frame is the decorator # The next frame is the object being decorated frame = sys._getframe(stacklevel + 1) class_name = frame.f_code.co_name if class_name and class_name != '<module>': obj.__full_name__ = '{}.{}.{}'.format( obj.__module__, class_name, obj.__name__) else: obj.__full_name__ = '{}.{}'.format( obj.__module__, obj.__name__) def manage_wrapping(wrapper, obj): """Add attributes to wrapper and wrapped functions.""" wrapper.__doc__ = obj.__doc__ wrapper.__name__ = obj.__name__ wrapper.__module__ = obj.__module__ wrapper.__signature__ = signature(obj) if not hasattr(obj, '__full_name__'): add_decorated_full_name(obj, 2) wrapper.__full_name__ = obj.__full_name__ # Use the previous wrappers depth, if it exists wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1 # Obtain the wrapped object from the previous wrapper wrapped = getattr(obj, '__wrapped__', obj) wrapper.__wrapped__ = wrapped # Increment the number of wrappers if hasattr(wrapped, '__wrappers__'): wrapped.__wrappers__ += 1 else: wrapped.__wrappers__ = 1 def get_wrapper_depth(wrapper): """Return depth of wrapper function.""" return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__) def add_full_name(obj): """ A decorator to add __full_name__ to the function being decorated. This should be done for all decorators used in pywikibot, as any decorator that does not add __full_name__ will prevent other decorators in the same chain from being able to obtain it. This can be used to monkey-patch decorators in other modules. e.g. <xyz>.foo = add_full_name(<xyz>.foo) @param obj: The function to decorate @type obj: callable @return: decorating function @rtype: function """ def outer_wrapper(*outer_args, **outer_kwargs): """Outer wrapper. The outer wrapper may be the replacement function if the decorated decorator was called without arguments, or the replacement decorator if the decorated decorator was called without arguments. @param outer_args: args @param outer_kwargs: kwargs """ def inner_wrapper(*args, **kwargs): """Replacement function. If the decorator supported arguments, they are in outer_args, and this wrapper is used to process the args which belong to the function that the decorated decorator was decorating. @param args: args passed to the decorated function. @param kwargs: kwargs passed to the decorated function. """ add_decorated_full_name(args[0]) return obj(*outer_args, **outer_kwargs)(*args, **kwargs) inner_wrapper.__doc__ = obj.__doc__ inner_wrapper.__name__ = obj.__name__ inner_wrapper.__module__ = obj.__module__ inner_wrapper.__signature__ = signature(obj) # The decorator being decorated may have args, so both # syntax need to be supported. if (len(outer_args) == 1 and len(outer_kwargs) == 0 and callable(outer_args[0])): add_decorated_full_name(outer_args[0]) return obj(outer_args[0]) else: return inner_wrapper if not __debug__: return obj return outer_wrapper def _build_msg_string(instead, since): """Build a deprecation warning message format string.""" if not since: since = '' elif '.' in since: since = ' since release ' + since else: year_str = month_str = day_str = '' days = (datetime.utcnow() - datetime.strptime(since, '%Y%m%d')).days years = days // 365 days = days % 365 months = days // 30 days = days % 30 if years == 1: years = 0 months += 12 if years: year_str = '{0} years'.format(years) else: day_str = '{0} day{1}'.format(days, 's' if days != 1 else '') if months: month_str = '{0} month{1}'.format( months, 's' if months != 1 else '') if year_str and month_str: year_str += ' and ' if month_str and day_str: month_str += ' and ' since = ' for {0}{1}{2}'.format(year_str, month_str, day_str) if instead: msg = '{{0}} is deprecated{since}; use {{1}} instead.' else: msg = '{{0}} is deprecated{since}.' return msg.format(since=since) def issue_deprecation_warning(name, instead, depth, warning_class=None, since=None): """Issue a deprecation warning. @param name: the name of the deprecated object @type name: str @param instead: suggested replacement for the deprecated object @type instead: str @param depth: depth + 1 will be used as stacklevel for the warnings @type depth: int @param warning_class: a warning class (category) to be used, defaults to DeprecationWarning @type warning_class: type @param since: a timestamp string of the date when the method was deprecated (form 'YYYYMMDD') or a version string. @type since: str """ msg = _build_msg_string(instead, since) if warning_class is None: warning_class = (DeprecationWarning if instead else _NotImplementedWarning) warn(msg.format(name, instead), warning_class, depth + 1) @add_full_name def deprecated(*args, **kwargs): """Decorator to output a deprecation warning. @kwarg instead: if provided, will be used to specify the replacement @type instead: str @kwarg since: a timestamp string of the date when the method was deprecated (form 'YYYYMMDD') or a version string. @type since: str """ def decorator(obj): """Outer wrapper. The outer wrapper is used to create the decorating wrapper. @param obj: function being wrapped @type obj: object """ def wrapper(*args, **kwargs): """Replacement function. @param args: args passed to the decorated function. @param kwargs: kwargs passed to the decorated function. @return: the value returned by the decorated function @rtype: any """ name = obj.__full_name__ depth = get_wrapper_depth(wrapper) + 1 issue_deprecation_warning(name, instead, depth, since=since) return obj(*args, **kwargs) def add_docstring(wrapper): """Add a Deprecated notice to the docstring.""" deprecation_notice = 'Deprecated' if instead: deprecation_notice += '; use ' + instead + ' instead' deprecation_notice += '.\n\n' if wrapper.__doc__: # Append old docstring after the notice wrapper.__doc__ = deprecation_notice + wrapper.__doc__ else: wrapper.__doc__ = deprecation_notice if not __debug__: return obj manage_wrapping(wrapper, obj) # Regular expression to find existing deprecation notices deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]', re.IGNORECASE) # Add the deprecation notice to the docstring if not present if not wrapper.__doc__: add_docstring(wrapper) else: if not deprecated_notice.search(wrapper.__doc__): add_docstring(wrapper) else: # Get docstring up to @params so deprecation notices for # parameters don't disrupt it trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL) trimmed_doc = trim_params.match(wrapper.__doc__).group(0) if not deprecated_notice.search(trimmed_doc): # No notice add_docstring(wrapper) return wrapper since = kwargs.pop('since', None) without_parameters = (len(args) == 1 and len(kwargs) == 0 and callable(args[0])) if 'instead' in kwargs: instead = kwargs['instead'] elif not without_parameters and len(args) == 1: instead = args[0] else: instead = False # When called as @deprecated, return a replacement function if without_parameters: if not __debug__: return args[0] return decorator(args[0]) # Otherwise return a decorator, which returns a replacement function else: return decorator def deprecate_arg(old_arg, new_arg): """Decorator to declare old_arg deprecated and replace it with new_arg.""" return deprecated_args(**{old_arg: new_arg}) def deprecated_args(**arg_pairs): """ Decorator to declare multiple args deprecated. @param arg_pairs: Each entry points to the new argument name. With True or None it drops the value and prints a warning. If False it just drops the value. """ def decorator(obj): """Outer wrapper. The outer wrapper is used to create the decorating wrapper. @param obj: function being wrapped @type obj: object """ def wrapper(*__args, **__kw): """Replacement function. @param __args: args passed to the decorated function @param __kw: kwargs passed to the decorated function @return: the value returned by the decorated function @rtype: any """ name = obj.__full_name__ depth = get_wrapper_depth(wrapper) + 1 for old_arg, new_arg in arg_pairs.items(): output_args = { 'name': name, 'old_arg': old_arg, 'new_arg': new_arg, } if old_arg in __kw: if new_arg not in [True, False, None]: if new_arg in __kw: warn('%(new_arg)s argument of %(name)s ' 'replaces %(old_arg)s; cannot use both.' % output_args, RuntimeWarning, depth) else: # If the value is positionally given this will # cause a TypeError, which is intentional warn('%(old_arg)s argument of %(name)s ' 'is deprecated; use %(new_arg)s instead.' % output_args, DeprecationWarning, depth) __kw[new_arg] = __kw[old_arg] else: if new_arg is False: cls = PendingDeprecationWarning else: cls = DeprecationWarning warn('%(old_arg)s argument of %(name)s is deprecated.' % output_args, cls, depth) del __kw[old_arg] return obj(*__args, **__kw) if not __debug__: return obj manage_wrapping(wrapper, obj) if wrapper.__signature__: # Build a new signature with deprecated args added. # __signature__ is only available in Python 3 which has OrderedDict params = collections.OrderedDict() for param in wrapper.__signature__.parameters.values(): params[param.name] = param.replace() for old_arg, new_arg in arg_pairs.items(): params[old_arg] = inspect.Parameter( old_arg, kind=inspect._POSITIONAL_OR_KEYWORD, default='[deprecated name of ' + new_arg + ']' if new_arg not in [True, False, None] else NotImplemented) wrapper.__signature__ = inspect.Signature() wrapper.__signature__._parameters = params return wrapper return decorator def remove_last_args(arg_names): """ Decorator to declare all args additionally provided deprecated. All positional arguments appearing after the normal arguments are marked deprecated. It marks also all keyword arguments present in arg_names as deprecated. Any arguments (positional or keyword) which are not present in arg_names are forwarded. For example a call with 3 parameters and the original function requests one and arg_names contain one name will result in an error, because the function got called with 2 parameters. The decorated function may not use C{*args} or C{**kwargs}. @param arg_names: The names of all arguments. @type arg_names: iterable; for the most explanatory message it should retain the given order (so not a set for example). """ def decorator(obj): """Outer wrapper. The outer wrapper is used to create the decorating wrapper. @param obj: function being wrapped @type obj: object """ def wrapper(*__args, **__kw): """Replacement function. @param __args: args passed to the decorated function @param __kw: kwargs passed to the decorated function @return: the value returned by the decorated function @rtype: any """ name = obj.__full_name__ depth = get_wrapper_depth(wrapper) + 1 args, varargs, kwargs, _ = getargspec(wrapper.__wrapped__) if varargs is not None and kwargs is not None: raise ValueError('{0} may not have * or ** args.'.format( name)) deprecated = set(__kw) & set(arg_names) if len(__args) > len(args): deprecated.update(arg_names[:len(__args) - len(args)]) # remove at most |arg_names| entries from the back new_args = tuple(__args[:max(len(args), len(__args) - len(arg_names))]) new_kwargs = {arg: val for arg, val in __kw.items() if arg not in arg_names} if deprecated: # sort them according to arg_names deprecated = [arg for arg in arg_names if arg in deprecated] warn("The trailing arguments ('{0}') of {1} are deprecated. " "The value(s) provided for '{2}' have been dropped.". format("', '".join(arg_names), name, "', '".join(deprecated)), DeprecationWarning, depth) return obj(*new_args, **new_kwargs) manage_wrapping(wrapper, obj) return wrapper return decorator def redirect_func(target, source_module=None, target_module=None, old_name=None, class_name=None, since=None): """ Return a function which can be used to redirect to 'target'. It also acts like marking that function deprecated and copies all parameters. @param target: The targeted function which is to be executed. @type target: callable @param source_module: The module of the old function. If '.' defaults to target_module. If 'None' (default) it tries to guess it from the executing function. @type source_module: basestring @param target_module: The module of the target function. If 'None' (default) it tries to get it from the target. Might not work with nested classes. @type target_module: basestring @param old_name: The old function name. If None it uses the name of the new function. @type old_name: basestring @param class_name: The name of the class. It's added to the target and source module (separated by a '.'). @type class_name: basestring @param since: a timestamp string of the date when the method was deprecated (form 'YYYYMMDD') or a version string. @type since: str @return: A new function which adds a warning prior to each execution. @rtype: callable """ def call(*a, **kw): issue_deprecation_warning(old_name, new_name, 2, since=since) return target(*a, **kw) if target_module is None: target_module = target.__module__ if target_module and target_module[-1] != '.': target_module += '.' if source_module == '.': source_module = target_module elif source_module and source_module[-1] != '.': source_module += '.' else: source_module = sys._getframe(1).f_globals['__name__'] + '.' if class_name: target_module += class_name + '.' source_module += class_name + '.' old_name = source_module + (old_name or target.__name__) new_name = target_module + target.__name__ if not __debug__: return target return call class ModuleDeprecationWrapper(types.ModuleType): """A wrapper for a module to deprecate classes or variables of it.""" def __init__(self, module): """ Initialise the wrapper. It will automatically overwrite the module with this instance in C{sys.modules}. @param module: The module name or instance @type module: str or module """ if isinstance(module, StringTypes): module = sys.modules[module] super(ModuleDeprecationWrapper, self).__setattr__('_deprecated', {}) super(ModuleDeprecationWrapper, self).__setattr__('_module', module) self.__dict__.update(module.__dict__) if __debug__: sys.modules[module.__name__] = self def _add_deprecated_attr(self, name, replacement=None, replacement_name=None, warning_message=None, since=None): """ Add the name to the local deprecated names dict. @param name: The name of the deprecated class or variable. It may not be already deprecated. @type name: str @param replacement: The replacement value which should be returned instead. If the name is already an attribute of that module this must be None. If None it'll return the attribute of the module. @type replacement: any @param replacement_name: The name of the new replaced value. Required if C{replacement} is not None and it has no __name__ attribute. If it contains a '.', it will be interpreted as a Python dotted object name, and evaluated when the deprecated object is needed. @type replacement_name: str @param warning_message: The warning to display, with positional variables: {0} = module, {1} = attribute name, {2} = replacement. @type warning_message: basestring @param since: a timestamp string of the date when the method was deprecated (form 'YYYYMMDD') or a version string. @type since: str """ if '.' in name: raise ValueError('Deprecated name "{0}" may not contain ' '".".'.format(name)) if name in self._deprecated: raise ValueError('Name "{0}" is already deprecated.'.format(name)) if replacement is not None and hasattr(self._module, name): raise ValueError('Module has already an attribute named ' '"{0}".'.format(name)) if replacement_name is None: if hasattr(replacement, '__name__'): replacement_name = replacement.__module__ if hasattr(replacement, '__self__'): replacement_name += '.' replacement_name += replacement.__self__.__class__.__name__ replacement_name += '.' + replacement.__name__ else: raise TypeError('Replacement must have a __name__ attribute ' 'or a replacement name must be set ' 'specifically.') if not warning_message: warning_message = _build_msg_string( replacement_name, since).format('{0}.{1}', '{2}') if hasattr(self, name): # __getattr__ will only be invoked if self.<name> does not exist. delattr(self, name) self._deprecated[name] = replacement_name, replacement, warning_message def __setattr__(self, attr, value): """Set the value of the wrapped module.""" self.__dict__[attr] = value setattr(self._module, attr, value) def __getattr__(self, attr): """Return the attribute with a deprecation warning if required.""" if attr in self._deprecated: warning_message = self._deprecated[attr][2] warn(warning_message.format(self._module.__name__, attr, self._deprecated[attr][0]), DeprecationWarning, 2) if self._deprecated[attr][1]: return self._deprecated[attr][1] elif '.' in self._deprecated[attr][0]: try: package_name = self._deprecated[attr][0].split('.', 1)[0] module = import_module(package_name) context = {package_name: module} replacement = eval(self._deprecated[attr][0], context) self._deprecated[attr] = ( self._deprecated[attr][0], replacement, self._deprecated[attr][2] ) return replacement except Exception: pass return getattr(self._module, attr) @deprecated('open_archive()', since='20150915') def open_compressed(filename, use_extension=False): """DEPRECATED: Open a file and uncompress it if needed.""" return open_archive(filename, use_extension=use_extension) def file_mode_checker(filename, mode=0o600, quiet=False, create=False): """Check file mode and update it, if needed. @param filename: filename path @type filename: basestring @param mode: requested file mode @type mode: int @param quiet: warn about file mode change if False. @type quiet: bool @param create: create the file if it does not exist already @type create: bool @raise IOError: The file does not exist and `create` is False. """ try: st_mode = os.stat(filename).st_mode except OSError: # file does not exist if not create: raise os.close(os.open(filename, os.O_CREAT | os.O_EXCL, mode)) return warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.' if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode): os.chmod(filename, mode) # re-read and check changes if os.stat(filename).st_mode != st_mode and not quiet: warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode)) def compute_file_hash(filename, sha='sha1', bytes_to_read=None): """Compute file hash. Result is expressed as hexdigest(). @param filename: filename path @type filename: basestring @param func: hashing function among the following in hashlib: md5(), sha1(), sha224(), sha256(), sha384(), and sha512() function name shall be passed as string, e.g. 'sha1'. @type filename: basestring @param bytes_to_read: only the first bytes_to_read will be considered; if file size is smaller, the whole file will be considered. @type bytes_to_read: None or int """ size = os.path.getsize(filename) if bytes_to_read is None: bytes_to_read = size else: bytes_to_read = min(bytes_to_read, size) step = 1 << 20 shas = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'] assert sha in shas sha = getattr(hashlib, sha)() # sha instance with open(filename, 'rb') as f: while bytes_to_read > 0: read_bytes = f.read(min(bytes_to_read, step)) assert read_bytes # make sure we actually read bytes bytes_to_read -= len(read_bytes) sha.update(read_bytes) return sha.hexdigest() # deprecated parts ############################################################ class ContextManagerWrapper(object): """ DEPRECATED. Wraps an object in a context manager. It is redirecting all access to the wrapped object and executes 'close' when used as a context manager in with-statements. In such statements the value set via 'as' is directly the wrapped object. For example: >>> class Wrapper(object): ... def close(self): pass >>> an_object = Wrapper() >>> wrapped = ContextManagerWrapper(an_object) >>> with wrapped as another_object: ... assert another_object is an_object It does not subclass the object though, so isinstance checks will fail outside a with-statement. """ def __init__(self, wrapped): """Create a new wrapper.""" super(ContextManagerWrapper, self).__init__() super(ContextManagerWrapper, self).__setattr__('_wrapped', wrapped) def __enter__(self): """Enter a context manager and use the wrapped object directly.""" return self._wrapped def __exit__(self, exc_type, exc_value, traceback): """Call close on the wrapped object when exiting a context manager.""" self._wrapped.close() def __getattr__(self, name): """Get the attribute from the wrapped object.""" return getattr(self._wrapped, name) def __setattr__(self, name, value): """Set the attribute in the wrapped object.""" setattr(self._wrapped, name, value) @deprecated('bot_choice.Option and its subclasses', since='20181217') def concat_options(message, line_length, options): """DEPRECATED. Concatenate options.""" indent = len(message) + 2 line_length -= indent option_msg = '' option_line = '' for option in options: if option_line: option_line += ', ' # +1 for ',' if len(option_line) + len(option) + 1 > line_length: if option_msg: option_msg += '\n' + ' ' * indent option_msg += option_line[:-1] # remove space option_line = '' option_line += option if option_line: if option_msg: option_msg += '\n' + ' ' * indent option_msg += option_line return '{0} ({1}):'.format(message, option_msg) wrapper = ModuleDeprecationWrapper(__name__) wrapper._add_deprecated_attr('Counter', collections.Counter, since='20160111') wrapper._add_deprecated_attr('OrderedDict', collections.OrderedDict, since='20160111') wrapper._add_deprecated_attr('count', itertools.count, since='20160111') wrapper._add_deprecated_attr('ContextManagerWrapper', replacement_name='', since='20180402')
android_hooks.py
# -*- coding: utf-8 -*- # An android event hook via getevent. # Only ABS_MT_POSITION_X(Y) events are handled. # # Basic input: TouchDown(D), TouchUp(U), TouchMove(M) # Basic timeouts: TouchPressTimeout(P), TouchFollowTimeout(F), TouchMoveStopTimeout(S) # guestures are defined as follows: # Tap/Touch/Click: DM?UF # TapFollow: (DM?U)+DM?UF # LongPress: DP, may be followed by Drag or Swipe # Drag: D?M+S, may be followed by Drag or Swipe # Swipe/Fling: D?M+U, difference with `Drag` is that `TouchMoveStopTimeout` cannot be fired. # 2-Finger-Pinch: distance changing # 2-Finger-Drag: distance hold while moving # where '?' after M means a little movement and '+' means a large one. # other guestures are ignored. import re import math import time import numpy as np import subprocess import threading import Queue import traceback __all__ = ['AndroidInputHookManager', 'HookManager', 'HookConstants'] # global, max MultiTap count. Set to 1 to disable MultiTap, 0 for infinite. _MULTI_TAP_NUM = 3 def set_multitap(count): if count < 0: print 'Cannot set to negative count.' return global _MULTI_TAP_NUM _MULTI_TAP_NUM = int(count) class HookConstants: # basic events TOUCH_ANY = 1 << 3 TOUCH_DOWN = 1 << 3 ^ 1 TOUCH_UP = 1 << 3 ^ 2 TOUCH_MOVE = 1 << 3 ^ 3 # only used for gesture analyze TOUCH_PRESS_TIMEOUT = 1 << 3 ^ 4 TOUCH_FOLLOW_TIMEOUT = 1 << 3 ^ 5 TOUCH_MOVESTOP_TIMEOUT = 1 << 3 ^ 6 # DOWN is odd, UP is even & DONW + 1 == UP KEY_ANY = 1 << 4 KEY_HOME_DOWN = 1 << 4 ^ 1 KEY_HOME_UP = 1 << 4 ^ 2 KEY_BACK_DOWN = 1 << 4 ^ 3 KEY_BACK_UP = 1 << 4 ^ 4 KEY_MENU_DOWN = 1 << 4 ^ 5 KEY_MENU_UP = 1 << 4 ^ 6 KEY_POWER_DOWN = 1 << 4 ^ 7 KEY_POWER_UP = 1 << 4 ^ 8 KEY_VOLUMEDOWN_DOWN = 1 << 4 ^ 9 KEY_VOLUMEDOWN_UP = 1 << 4 ^ 10 KEY_VOLUMEUP_DOWN = 1 << 4 ^ 11 KEY_VOLUMEUP_UP = 1 << 4 ^ 12 # gestures GST_TAP = 1 << 5 ^ 1 GST_MULTI_TAP = 1 << 5 ^ 2 GST_LONG_PRESS = 1 << 5 ^ 3 GST_LONG_PRESS_RELEASE = 1 << 5 ^ 4 GST_DRAG = 1 << 5 ^ 5 GST_SWIPE = 1 << 5 ^ 6 GST_PINCH_IN = 1 << 5 ^ 7 GST_PINCH_OUT = 1 << 5 ^ 8 HC = HookConstants HCREPR = { HC.TOUCH_DOWN : 'D', HC.TOUCH_UP : 'U', HC.TOUCH_MOVE : 'M', HC.TOUCH_PRESS_TIMEOUT : 'P', HC.TOUCH_FOLLOW_TIMEOUT : 'F', HC.TOUCH_MOVESTOP_TIMEOUT : 'S', HC.GST_TAP: 'Tap', HC.GST_MULTI_TAP: 'MultiTap', HC.GST_LONG_PRESS: 'LongPress', HC.GST_LONG_PRESS_RELEASE: 'PressRelease', HC.GST_DRAG: 'Drag', HC.GST_SWIPE: 'Swipe', HC.GST_PINCH_IN: 'PinchIn', HC.GST_PINCH_OUT: 'PinchOut', } class Event(object): def __init__(self, time, msg): self.time = time self.msg = msg def __str__(self): return '%s_%s' % (self.__class__.__name__, HCREPR.get(self.msg, self.msg)) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%s' % (k, v) for k, v in self.__dict__.iteritems()])) class KeyEvent(Event): def __init__(self, time, msg, key): super(KeyEvent, self).__init__(time, msg) # convert to KEYCODE_xxx for 'adb input keyevent xxx' if key.startswith('KEY_'): key = 'KEYCODE_' + key[4:] self.key = key class TouchEvent(Event): def __init__(self, time, msg, slotid, x, y, pressure, touch_major, **extra): super(TouchEvent, self).__init__(time, msg) self.slotid = slotid self.x = x self.y = y self.pressure = pressure self.touch_major = touch_major self.__dict__.update(extra) class TouchTimeoutEvent(Event): def __init__(self, time, msg, slotid): super(TouchTimeoutEvent, self).__init__(time, msg) self.slotid = slotid class GestureEvent(Event): def __init__(self, msg, track): # suffixes: s for start, e for end. # two-finger guestures need two tracks if msg in (HC.GST_PINCH_IN, HC.GST_PINCH_OUT): t1, t2 = track[0], track[1] ts = min(t1[0].time, t2[0].time) te = max(t1[-1].time, t2[-1].time) else: es, ee = track[0], track[-1] ts, te = track[0].time, track[-1].time print 'Gesture', HCREPR.get(msg, msg), ''.join([HCREPR.get(e.msg, e.msg) for e in track]), (es.x, es.y), (ee.x, ee.y) if msg in (HC.GST_SWIPE, HC.GST_DRAG): # TODO: check for corners for complicated trace self.points = [(es.x, es.y), (ee.x, ee.y)] else: self.points = [(es.x, es.y), (ee.x, ee.y)] super(GestureEvent, self).__init__(ts, msg) self.duration = te - ts SLOT_NUM = 5 _X, _Y, _VR, _VA, _MJ, _PR, FIELD_NUM = range(7) INF = 9999 class InputParser(object): _pat = re.compile('\[\s*(?P<time>[0-9.]+)\] (?P<device>/dev/.*): +(?P<type>\w+) +(?P<code>\w+) +(?P<value>\w+)') _move_radius = 10 def __init__(self, queue): self.timediff = None self.queue = queue # the 'standard' status temp_status is compared to. # if changes are great enough, new event are emitted. # velocity will be calculated for touch-move events. self._status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF) self._status_time = 0 # realtime status, minor changes are cumulated self._temp_status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF) self._temp_status_time = 0 self._touch_batch = [] self._curr_slot = 0 def feed(self, line): # print line m = self._pat.search(line) if not m: return _time, _device, _type, _code, _value = m.groups() _time = float(_time) if self.timediff is None: self.timediff = time.time() - _time _time = self.timediff + _time try: _value = int(_value, 16) except: pass if _type == 'EV_SYN': if _code in ('SYN_REPORT', 'SYN_MT_REPORT'): try: self._process_touch_batch() except IndexError: # there might be a 6th finger, ignore that. self._touch_batch = [] elif _code == 'SYN_DROPPED': self._touch_batch = [] else: # print 'unknown syn code', _code pass elif _type == 'EV_KEY': self.emit_key_event(_time, _code, _value) elif _type == 'EV_ABS': self._touch_batch.append((_time, _device, _type, _code, _value)) else: # print 'unknown input event type', _type pass def emit_key_event(self, _time, _code, _value): name = '%s_%s' % (_code, _value) msg = getattr(HC, name, None) if msg is None: return event = KeyEvent(_time, msg, _code) self.queue.put(event) def emit_touch_event(self, event): self.queue.put(event) def _process_touch_batch(self): '''a batch syncs in about 0.001 seconds.''' if not self._touch_batch: return _time = self._temp_status_time changed = False for (_time, _device, _type, _code, _value) in self._touch_batch: if _code == 'ABS_MT_TRACKING_ID': if _value == 0xffffffff: self._temp_status[self._curr_slot] = -INF changed = True else: pass elif _code == 'ABS_MT_SLOT': self._curr_slot = _value else: if _code == 'ABS_MT_POSITION_X': self._temp_status[self._curr_slot,_X] = _value changed = True elif _code == 'ABS_MT_POSITION_Y': self._temp_status[self._curr_slot,_Y] = _value changed = True elif _code == 'ABS_MT_PRESSURE': self._temp_status[self._curr_slot,_PR] = _value elif _code == 'ABS_MT_TOUCH_MAJOR': self._temp_status[self._curr_slot,_MJ] = _value else: print 'Unknown code', _code self._temp_status_time = _time self._touch_batch = [] if not changed: return # check differences, if position changes are big enough then emit events diff = self._temp_status - self._status dt = self._temp_status_time - self._status_time emitted = False for i in range(SLOT_NUM): arr = self._temp_status[i] oldarr = self._status[i] dx, dy = diff[i,_X], diff[i,_Y] if dx > INF or dy > INF: # touch begin event = TouchEvent(_time, HC.TOUCH_DOWN, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ]) self.emit_touch_event(event) emitted = True elif dx < -INF or dy < -INF: # touch end event = TouchEvent(_time, HC.TOUCH_UP, i, oldarr[_X], oldarr[_Y], oldarr[_PR], oldarr[_MJ]) self.emit_touch_event(event) emitted = True else: r, a = radang(float(dx), float(dy)) if r > self._move_radius: v = r / dt event = TouchEvent(_time, HC.TOUCH_MOVE, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ], angle=a, velocity=v) self.emit_touch_event(event) emitted = True if not emitted: return self._status = self._temp_status.copy() self._status_time = self._temp_status_time def radang(x, y): '''return (radius, angle) of a vector(x, y)''' if x == 0: if y == 0: return 0, 0 return abs(y), 90+180*(y<0) if y == 0: return abs(x), 180*(x<0) r = math.sqrt(x*x+y*y) a = math.degrees(math.atan(y/x)) if x < 0: a += 180 elif y < 0: a += 360 return r, a class GestureRecognizer(object): double_tap_delay = 0.5 long_press_delay = 1 move_stop_delay = 0.2 pinch_difference_square = 3000 def __init__(self, queue): self.queue = queue self.dispatch_map = {} self.running = False self.touches = [None] * SLOT_NUM # used for recognition self.tracks = [None for i in range(SLOT_NUM)] self.track_slots = set() def register(self, keycode, func): self.dispatch_map[keycode] = func def start(self): if self.running: return self.running = True t = threading.Thread(target=self.process) t.setDaemon(True) t.start() def stop(self): self.running = False def process(self): '''handle events and trigger time-related events''' timediff = 0 while True: try: time.sleep(0.001) event = self.queue.get_nowait() self.handle_event(event) if event.msg & HC.KEY_ANY: continue if timediff == 0: timediff = time.time() - event.time self.touches[event.slotid] = event except Queue.Empty: if not self.running: break now = time.time() - timediff for i in range(SLOT_NUM): e = self.touches[i] if e is None: continue if e.msg == HC.TOUCH_DOWN and now - e.time > self.long_press_delay: self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_PRESS_TIMEOUT, i)) self.touches[i] = None elif e.msg == HC.TOUCH_UP and now - e.time > self.double_tap_delay: self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_FOLLOW_TIMEOUT, i)) self.touches[i] = None elif e.msg == HC.TOUCH_MOVE and now - e.time > self.move_stop_delay: self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_MOVESTOP_TIMEOUT, i)) self.touches[i] = None except: traceback.print_exc() print 'process done.' def handle_event(self, event): self.dispatch_event(event.msg, event) if event.msg & HC.KEY_ANY: self.dispatch_event(HC.KEY_ANY, event) else: self.dispatch_event(HC.TOUCH_ANY, event) self.analyze_tracks(event) def dispatch_event(self, msg, event): func = self.dispatch_map.get(msg) if func is None: return try: func(event) except: traceback.print_exc() def analyze_tracks(self, event): pass def handle_gesture(self, msg, tracks): event = GestureEvent(msg, tracks) func = self.dispatch_map.get(msg) if func is None: return try: func(event) except: traceback.print_exc() ## NOT COMPLEMENTED ## class SimpleGestureRecognizer(GestureRecognizer): N_FINGER = 2 def analyze_tracks(self, event): # handle one-finger and two-finger gestures only # means a third finger will be ignored even if one of the # first two fingers leaves the screen. i = event.slotid # begin guesture when touch down if event.msg == HC.TOUCH_DOWN: if len(self.track_slots) == self.N_FINGER and i not in self.track_slots: return if self.tracks[i] is None: self.tracks[i] = [] self.track_slots.add(i) self.tracks[i].append(event) return if self.tracks[i] is None: return if event.msg == HC.TOUCH_FOLLOW_TIMEOUT: self.tracks[i] = [] elif event.msg == HC.TOUCH_PRESS_TIMEOUT: # print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'long press' self.tracks[i] = [] elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT: # print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'drag' self.tracks[i] = [] if len(self.track_slots) == 2: for s in self.track_slots: print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]]) print elif event.msg == HC.TOUCH_UP: self.tracks[i].append(event) if len(self.track_slots) == 2: for s in self.track_slots: print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]]) print self.tracks[i] = None self.track_slots.discard(i) else: # TOUCH_MOVE self.tracks[i].append(event) return # check for pinch/pan if len(self.track_slots) == 2: t1, t2 = [self.tracks[s] for s in self.track_slots] if len(t1) == 0 or len(t2) == 0 or len(t1) + len(t2) < 6: return # make copy and check distance changing t1, t2 = t1[:], t2[:] dists = [] while len(dists) < 5: e1, e2 = t1[-1], t2[-1] dx, dy = e1.x-e2.x, e1.y-e2.y dists.append(dx*dx+dy*dy) if e1.time < e2.time: if len(t2) == 1: break else: t2.pop() else: if len(t1) == 1: break else: t1.pop() print [dists[j+1]-dists[j] for j in range(len(dists)-1)] # just keep latest position for s in self.track_slots: self.tracks[s] = self.tracks[s][-1:] class RegexpGestureRecognizer(GestureRecognizer): N_FINGER = 1 def analyze_tracks(self, event): # handle one-finger gestures only i = event.slotid # begin guesture when touch down if event.msg == HC.TOUCH_DOWN: if len(self.track_slots) == self.N_FINGER and i not in self.track_slots: return if not self.tracks[i]: self.tracks[i] = [] self.track_slots.add(i) self.tracks[i].append(event) return if self.tracks[i] is None: return s = ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]) if event.msg == HC.TOUCH_FOLLOW_TIMEOUT: if re.match('^DM?U$', s): self.handle_gesture(HC.GST_TAP, self.tracks[i][:]) elif re.match('^(DM?U)+DM?U$', s): self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:]) self.tracks[i] = None self.track_slots.discard(i) elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT: if re.match('^D?MM+$', s): self.handle_gesture(HC.GST_DRAG, self.tracks[i][:]) self.tracks[i] = [] elif event.msg == HC.TOUCH_PRESS_TIMEOUT: if s == 'D': self.handle_gesture(HC.GST_LONG_PRESS, self.tracks[i][:]) self.tracks[i] = [] elif event.msg == HC.TOUCH_UP: self.tracks[i].append(event) # note: it's not the same with s after add if s == '': self.handle_gesture(HC.GST_LONG_PRESS_RELEASE, [event]) elif re.match('^D?MM+$', s): self.handle_gesture(HC.GST_SWIPE, self.tracks[i][:]) self.tracks[i] = [] elif _MULTI_TAP_NUM == 1 and re.match('^DM?$', s): self.handle_gesture(HC.GST_TAP, self.tracks[i][:]) self.tracks[i] = [] elif _MULTI_TAP_NUM > 1 and re.match('^(DM?U){%d}DM?$' % (_MULTI_TAP_NUM-1,), s): self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:]) self.tracks[i] = [] elif event.msg == HC.TOUCH_MOVE: if re.match('^(DU)+D$', s): if s == 'DUD': self.handle_gesture(HC.GST_TAP, self.tracks[i][:-1]) else: self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:-1]) self.tracks[i] = self.tracks[i][-1:] self.tracks[i].append(event) NOTACTIVE, ACTIVE, STAGE_1, STAGE_2, TRIGGERED = range(5) ## NOT COMPLEMENTED ## class StateMachineGestureRecognizer(GestureRecognizer): state_map = { HC.GST_TAP: { NOTACTIVE: { HC.TOUCH_DOWN : ACTIVE }, ACTIVE: { HC.TOUCH_MOVE: STAGE_1, HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE, HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED, }, STAGE_1: { HC.TOUCH_MOVE: NOTACTIVE, HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE, HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED, } }, HC.GST_SWIPE: { NOTACTIVE: { HC.TOUCH_DOWN: ACTIVE }, ACTIVE: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_1}, STAGE_1: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_2 }, STAGE_2: { HC.TOUCH_UP: TRIGGERED, HC.TOUCH_MOVESTOP_TIMEOUT: TRIGGERED}, }, } def __init__(self, queue): super(self.__class__, self).__init__(queue) self.state = {} for k in self.state_map: self.state[k] = NOTACTIVE print self.state_map def analyze_tracks(self, event): for k, v in self.state.iteritems(): s = self.state_map.get(k, {}).get(v, {}).get(event.msg) if s is not None: self.state[k] = s triggered = False for k, v in self.state.iteritems(): if v == TRIGGERED: print 'trigger event', k triggered = True if triggered: for k in self.state: self.state[k] = NOTACTIVE class AndroidInputHookManager(object): def __init__(self, serial=None, processor_class=RegexpGestureRecognizer): self._serial = serial self.running = False self._queue = Queue.Queue() self._listener = None self._parser = InputParser(self._queue) self._processor = processor_class(self._queue) def set_serial(self, serial): self._serial = serial def register(self, keycode, func): '''register hook function''' self._processor.register(keycode, func) def hook(self): self._processor.start() self.running = True t = threading.Thread(target=self._run_hook) t.setDaemon(True) t.start() def _run_hook(self): cmd = ['adb'] if self._serial: cmd.extend(['-s', self._serial]) cmd.extend(['shell', 'getevent', '-lt']) while True: # start listener self._listener = p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: try: line = p.stdout.readline().strip() if not line: if p.poll() is not None: break continue self._parser.feed(line) except KeyboardInterrupt: p.kill() except: p.kill() traceback.print_exc() if not self.running: break state = subprocess.check_output(['adb', '-s', self._serial, 'get-state']).strip() if state != 'device': print 'adb status(%s) wrong! stop hook.' % (state,) break print 'adb getevent died, reconnecting...' time.sleep(1) def unhook(self): self.running = False self._processor.stop() if self._listener: self._listener.kill() HookManager = AndroidInputHookManager if __name__ == '__main__': hm = AndroidInputHookManager(processor_class=RegexpGestureRecognizer) hm.hook() while True: try: time.sleep(0.1) except KeyboardInterrupt: break hm.unhook()
start.py
import time import main from multiprocessing import Process x = [] for i in range(10): p1 = Process(target=main.main, args=(i,)) p1.start() x.append(p1) for i in x: i.join()
proxyonly.py
import os import sys import time import json import codecs import shutil import threading from datetime import datetime, date if getattr(sys, 'frozen', False): rootdir = os.path.dirname(sys.executable) else: rootdir = os.path.dirname(os.path.realpath(__file__)) # make all filenames based on rootdir being unicode rootdir = rootdir.decode(sys.getfilesystemencoding()) sys.path.append(rootdir) from lib.utils import init_logging, local_update_datafile, set_ca_certs_env, singleton_check, singleton_clean from lib.ipc import ActorObject from component.admin import Admin from component.circumvention import CircumventionChannel, remote_update_meek_relays from component.local import HTTPProxy, SocksProxy from component.matcher import create_matcher, blacklist_info, remote_update_blacklist from component.brz import able_to_setproxy from component.hosts import hosts_info, remote_update_hosts class Coordinator(ActorObject): def __init__(self, rootdir, conf_file): super(Coordinator, self).__init__() self.rootdir = rootdir self.conf_file = conf_file self.confdata = None self.admin = None self.cc_channel = None self.matcher = None self.http_proxy = None self.socks_proxy = None def loadconf(self): f = codecs.open(os.path.join(self.rootdir, self.conf_file), "r", "utf-8") self.confdata = json.loads(f.read()) f.close() def backup_conf(self): conf = os.path.join(self.rootdir, self.conf_file) shutil.copy(conf, conf + ".last") default = conf + ".default" if not os.path.isfile(default): shutil.copy(conf, default) def recover_conf(self): conf = os.path.join(self.rootdir, self.conf_file) shutil.copy(conf + ".last", conf) def initialize(self): self.singleton = singleton_check(self.rootdir) if not self.singleton: sys.exit(-1) self.loadconf() self.ref().share('rootdir', self.rootdir) self.ref().share('confdata', self.confdata) self.start_actor() def start_admin(self): self.admin = Admin(self.ref()) self.admin.start() def start_cc_channel(self): try: self.cc_channel = CircumventionChannel(self.ref()) self.cc_channel.start() except Exception, e: print "failed to start circumvention channel: %s" % str(e) def start_local_proxy(self): global rootdir circumvention_url = self.IPC_circumvention_url() self.matcher = create_matcher(rootdir, self.confdata, circumvention_url) if self.confdata['enable_http_proxy']: try: self.http_proxy = HTTPProxy(self.ref(), self.matcher) self.http_proxy.start() except Exception, e: print "failed to start http proxy: %s" % str(e) if self.confdata['enable_socks_proxy']: try: self.socks_proxy = SocksProxy(self.ref(), self.matcher) self.socks_proxy.start() except Exception, e: print "failed to start socks proxy: %s" % str(e) def proxy_info(self): if self.socks_proxy: # ip, port = self.socks_proxy.ref().IPC_addr() # return ProxyInfo(socks.PROXY_TYPE_SOCKS5, ip, port, True, None, None) url = self.socks_proxy.ref().IPC_url() return {'http': url, 'https': url} elif self.http_proxy: # ip, port = self.http_proxy.ref().IPC_addr() # return ProxyInfo(socks.PROXY_TYPE_HTTP, ip, port, True, None, None) url = self.http_proxy.ref().IPC_url() return {'http': url, 'https': url} else: # return None return {} def update_matcher(self): circumvention_url = self.IPC_circumvention_url() self.matcher = create_matcher(rootdir, self.confdata, circumvention_url) if self.http_proxy: self.http_proxy.ref().IPC_update_matcher(self.matcher) if self.socks_proxy: self.socks_proxy.ref().IPC_update_matcher(self.matcher) def check_and_update_blacklist(self): try: blacklist_date = datetime.strptime(self.matcher.blacklist_matcher.meta['date'], '%Y-%m-%d').date() if date.today() > blacklist_date: updated = remote_update_blacklist(self.proxy_info(), self.rootdir, self.confdata) if updated: self.update_matcher() except Exception, e: print "failed to update blacklist: %s" % str(e) def check_and_update_hosts(self): try: hosts_date = datetime.strptime(self.matcher.hosts.meta['date'], '%Y-%m-%d').date() if date.today() > hosts_date: updated = remote_update_hosts(self.proxy_info(), self.rootdir, self.confdata) if updated: self.update_matcher() except Exception, e: print "failed to update hosts: %s" % str(e) def update_meek_relays(self): try: updated = remote_update_meek_relays(self.proxy_info(), self.rootdir, self.confdata) if updated: self.cc_channel.ref().IPC_update_meek_relays() except Exception, e: print "failed to update meek relays: %s" % str(e) def check_for_update(self): time.sleep(20) if self.cc_channel.type == "meek": self.update_meek_relays() self.check_and_update_blacklist() self.check_and_update_hosts() def run(self): try: self.initialize() self.start_cc_channel() self.start_admin() self.start_local_proxy() except Exception, e: print "failed to start basic steps/processes: %s, try to recover ..." % str(e) if not self.recover_conf(): raise e self.end() self.initialize() self.start_cc_channel() self.start_admin() self.start_local_proxy() self.backup_conf() t = threading.Thread(target=self.check_for_update) t.daemon = True t.start() def end(self): if self.admin: self.admin.terminate() self.admin.join() if self.cc_channel: self.cc_channel.terminate() self.cc_channel.join() if self.http_proxy: self.http_proxy.terminate() self.http_proxy.join() if self.socks_proxy: self.socks_proxy.terminate() self.socks_proxy.join() singleton_clean(self.rootdir, self.singleton) # IPC interfaces def IPC_circumvention_url(self): """ask circumvention channel for forwarding url""" return self.cc_channel.ref().IPC_url() def IPC_socks_proxy_addr(self): return self.socks_proxy.ref().IPC_addr() def IPC_http_proxy_addr(self): return self.http_proxy.ref().IPC_addr() def IPC_shadowsocks_methods(self): return self.cc_channel.ref().IPC_shadowsocks_methods() def IPC_blacklist_info(self): return blacklist_info(self.rootdir, self.confdata, self.matcher.blacklist_matcher) def IPC_hosts_info(self): return hosts_info(self.rootdir, self.confdata, self.matcher.hosts) def IPC_get_custom_blacklist(self): return self.matcher.blacklist_matcher.get_custom_blacklist() def IPC_get_custom_whitelist(self): return self.matcher.blacklist_matcher.get_custom_whitelist() def IPC_update_config(self, data): try: self.confdata.update(data) f = codecs.open(os.path.join(self.rootdir, self.conf_file), "w", "utf-8") f.write(json.dumps(self.confdata, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)) f.close() return data except Exception, e: print "failed to update config: %s" % str(e) return None def IPC_resume_default_config(self): conf = os.path.join(self.rootdir, self.conf_file) shutil.copy(conf + ".default", conf) self.loadconf() return self.confdata def IPC_update_blacklist(self): try: updated = remote_update_blacklist(self.proxy_info(), self.rootdir, self.confdata) if updated: self.update_matcher() return True except Exception, e: print "failed to update blacklist: %s" % str(e) return False def IPC_update_custom_list(self, custom_bl=None, custom_wl=None): if custom_bl: local_update_datafile(u"\n".join(custom_bl), os.path.join(self.rootdir, self.confdata['custom_blacklist'])) if custom_wl: local_update_datafile(u"\n".join(custom_wl), os.path.join(self.rootdir, self.confdata['custom_whitelist'])) self.update_matcher() def IPC_update_hosts(self): try: updated = remote_update_hosts(self.proxy_info(), self.rootdir, self.confdata) if updated: self.update_matcher() return True except Exception, e: print "failed to update hosts: %s" % str(e) return False def IPC_update_hosts_disabled(self, disabled): local_update_datafile(u"\n".join(disabled), os.path.join(self.rootdir, self.confdata['hosts']['disabled'])) self.update_matcher() def IPC_support_ssh(self): return self.cc_channel.ref().IPC_support_ssh() def IPC_setproxy_tip(self): if not self.confdata['launch_browser']: return False return not able_to_setproxy() def close_std(): sys.stdin.close() sys.stdin = open(os.devnull) sys.stderr.close sys.stderr = open(os.devnull) def main(): init_logging() global rootdir conf_file = "config.json" set_ca_certs_env(os.path.join(rootdir, "cacert.pem").encode(sys.getfilesystemencoding())) coordinator = Coordinator(rootdir, conf_file) coordinator.run() try: while True: time.sleep(10) except: print "quit ..." coordinator.end() if __name__ == '__main__': main()
fatigue_concurrent_client.py
import base64 import os import time from concurrent import futures import threading import argparse import sys import datetime import requests, json, numpy as np from multiprocessing import Process, Queue, Lock from google.protobuf.timestamp_pb2 import Timestamp import grpc #from clipper_admin.grpcclient import grpc_client from clipper_admin.rpc import (management_pb2, management_pb2_grpc, model_pb2, model_pb2_grpc, prediction_pb2, prediction_pb2_grpc) import logging logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s',) """ python imagequery_concurrent_client.py --worker 1 --ip 172.18.0.1 --port 22223 --system oursystem """ def oursystem(ip, port, inputt): channel = grpc.insecure_channel('%s:%s'%(ip, port)) stub = prediction_pb2_grpc.ProxyServerStub(channel) response = stub.downstream(prediction_pb2.request(input_ = model_pb2.input(inputType = 'string', inputStream = inputt))) return response.status def withoutproxy(ip, port, inputt): channel = grpc.insecure_channel('%s:%s'%(ip, port)) stub = model_pb2_grpc.PredictServiceStub(channel) response = stub.Predict(model_pb2.input(inputStream=inputt, inputType="String")) return response.outputStream def clipper(ip, port, inputt): headers = {"Content-type": "application/json"} requests.post("http://{}:{}/hello-world/predict".format(ip,port), headers=headers, data=json.dumps({"input": [inputt]})).json() return "Raft OK" def bigball(ip, port, inputt): # Better to use clipper to process bigball container. return "bigball" # Producer function that places data on the Queue def producer(queue, lock, ip, port, inputt_list, func): # Synchronize access to the console with lock: print('Starting worker => {}'.format(os.getpid())) # Query and return output on the Queue for inputt in inputt_list: #time.sleep(random.randint(0, 10)) output = func(ip, port, inputt) #queue.put(output) with lock: print("Input {} returns Output: {}".format(inputt, output)) # Synchronize access to the console with lock: print('Worker {} exiting...'.format(os.getpid())) # Currently no need # The consumer function takes data off of the Queue def consumer(queue, lock): # Synchronize access to the console with lock: print('Starting consumer => {}'.format(os.getpid())) # Run indefinitely while True: time.sleep(random.randint(0, 2)) # If the queue is empty, queue.get() will block until the queue has data output = queue.get() # Synchronize access to the console with lock: print('{} got {}'.format(os.getpid(), output)) def main(): parser = argparse.ArgumentParser(description='concurrent client') parser.add_argument('--worker', nargs=1, type=int, help="Worker num") parser.add_argument('--ip', nargs=1, type=str, help="Ip address of your query frontend") parser.add_argument('--port', nargs=1, type=str, help="Port of your query frontend, for Clipper, put an arbitrary INT") parser.add_argument('--system', nargs=1, type=str, help="System name: oursystem/withoutproxy/clipper") args = parser.parse_args() # Generate your inputt list here inputt_total = [str(i) for i in range(400,600)] # import random # random.shuffle(inputt_total) # inputt_total = [str(i) for i in inputt_total] # print("inputt_total: " , inputt_total) # Get configuration work_num = args.worker[0] ip = args.ip[0] port = args.port[0] system = args.system[0] # Create the Queue object queue = Queue() # Create a lock object to synchronize resource access lock = Lock() producers = [] consumers = [] thismodule = sys.modules[__name__] for i in range(work_num): # Slice the input_total to $work_num lists inputt_list = inputt_total[i::work_num] # Create our producer processes by passing the producer function and it's arguments producers.append(Process(target=producer, args=(queue, lock, ip, port, inputt_list, getattr(thismodule, system)))) # Create consumer processes #for i in range(work_num): # p = Process(target=consumer, args=(queue, lock)) # This is critical! The consumer function has an infinite loop # Which means it will never exit unless we set daemon to true # p.daemon = True # consumers.append(p) # Start the producers and consumer # The Python VM will launch new independent processes for each Process object start = time.time() for p in producers: p.start() #for c in consumers: # c.start() # Like threading, we have a join() method that synchronizes our program for p in producers: p.join() end = time.time() print('Finished %d requests with time:'%(len(inputt_total))) print(end-start) print('Parent process exiting...') if __name__ == '__main__': main()
test.py
import argparse import logging import subprocess import threading import time import uuid import yaml from collections import Counter import ovirtsdk4 as sdk import ovirtsdk4.types as types log = logging.getLogger("test") class Timeout(Exception): pass class JobError(Exception): pass class Runner: def __init__(self, index, conf): self.index = index self.conf = conf self.connection = None self.iteration = None self.vm = None self.snapshot = None self.passed = 0 self.failed = 0 self.errored = 0 def run(self): log.info("Started") self.connect() for i in range(self.conf["iterations"]): self.iteration = i start = time.monotonic() log.info("Iteration %d started", i) try: self.setup() try: self.test() log.info("Iteration %d passed", i) self.passed += 1 except Exception: log.exception("Iteration %d failed", i) self.failed += 1 except Exception: log.exception("Iteration %d errored", i) self.errored += 1 finally: try: self.teardown() except Exception: log.exception("Error tearing down") log.info("Iteration %d completed in %d seconds", i, time.monotonic() - start) self.disconnect() log.info("Finished") def setup(self): self.vm = None self.snapshot = None self.check_data_center() self.create_vm() self.start_vm() self.create_snapshot() self.write_data() def test(self): self.remove_snapshot() def teardown(self): if self.vm: self.check_data_center() self.stop_vm() self.remove_vm() def connect(self): self.connection = sdk.Connection( url="https://{}/ovirt-engine/api".format(self.conf["engine_fqdn"]), username=self.conf["engine_username"], password=self.conf["engine_password"], ca_file=self.conf["engine_cafile"] ) def disconnect(self): self.connection.close() # Data center health def check_data_center(self): log.info("Checking data center status for cluster %s", self.conf["cluster_name"]) start = time.monotonic() deadline = start + self.conf["data_center_up_timeout"] system_service = self.connection.system_service() clusters_service = system_service.clusters_service() clusters = clusters_service.list( search="name={}".format(self.conf["cluster_name"])) cluster = clusters[0] data_centers_service = system_service.data_centers_service() data_center_service = data_centers_service.data_center_service( id=cluster.data_center.id) data_center = data_center_service.get() log.debug("Data center %s is %s", data_center.name, data_center.status) if data_center.status == types.DataCenterStatus.UP: return log.info("Data center %s is %s, waiting until it is up", data_center.name, data_center.status) while True: time.sleep(self.conf["poll_interval"]) data_center = data_center_service.get() log.debug("Data center %s is %s", data_center.name, data_center.status) if data_center.status == types.DataCenterStatus.UP: break if time.monotonic() > deadline: raise Timeout( "Timeout waiting until data center {} is up" .format(data_center.name)) log.info("Data center %s recovered in %d seconds", data_center.name, time.monotonic() - start) # Modifying VMs. def create_vm(self): vm_name = "{}-{}-{}".format( self.conf["vm_name"], self.index, self.iteration) log.info("Creating vm %s", vm_name) start = time.monotonic() deadline = start + self.conf["create_vm_timeout"] vms_service = self.connection.system_service().vms_service() correlation_id = str(uuid.uuid4()) self.vm = vms_service.add( types.Vm( name=vm_name, cluster=types.Cluster(name=self.conf["cluster_name"]), template=types.Template(name=self.conf["template_name"]), placement_policy=types.VmPlacementPolicy( hosts=[ types.Host(name=self.conf["vm_host"]) ] ), ), # Clone to VM to keep raw disks raw. clone=True, query={'correlation_id': correlation_id}, ) try: self.wait_for_jobs(correlation_id, deadline) except JobError: self.vm = None raise log.info("VM %s created in %d seconds", self.vm.name, time.monotonic() - start) def start_vm(self): log.info("Starting vm %s", self.vm.name) start = time.monotonic() deadline = start + self.conf["start_vm_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) vm_service.start() self.wait_for_vm_status(types.VmStatus.UP, deadline) log.info("VM %s started in %d seconds", self.vm.name, time.monotonic() - start) def stop_vm(self): log.info("Stopping vm %s", self.vm.name) start = time.monotonic() deadline = start + self.conf["stop_vm_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) vm = vm_service.get() if vm.status == types.VmStatus.IMAGE_LOCKED: self.wait_for_vm_status(types.VmStatus.DOWN, deadline) elif vm.status != types.VmStatus.DOWN: self.try_to_stop_vm(deadline) log.info("VM %s stopped in %d seconds", self.vm.name, time.monotonic() - start) def try_to_stop_vm(self, deadline): vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) # Testing shows that if a vm is in WAIT_FOR_LUNCH state, # stopping it does nothing. To handle all possible cases, lets # repeat the stop request and check the status until the VM is # DOWN or the timeout expires. while True: try: vm_service.stop() except sdk.Error as e: log.warning("Error stopping vm %s: %s", self.vm.name, e) time.sleep(self.conf["poll_interval"]) vm = vm_service.get() log.debug("VM %s is %s", self.vm.name, vm.status) if vm.status == types.VmStatus.DOWN: break if time.monotonic() > deadline: raise Timeout( "Timeout stopping vm {}".format(self.vm.name)) def remove_vm(self): log.info("Removing vm %s", self.vm.name) start = time.monotonic() deadline = start + self.conf["remove_vm_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) vm_service.remove() while True: if time.monotonic() > deadline: raise Timeout("Timeout removing vm {}".format(self.vm.name)) time.sleep(self.conf["poll_interval"]) try: vm = vm_service.get() except sdk.NotFoundError: break except sdk.Error as e: log.warning("Error polling vm %s status, retrying: %s", self.vm.name, e) continue log.debug("VM %s status: %s", self.vm.name, vm.status) log.info("VM %s removed in %d seconds", self.vm.name, time.monotonic() - start) self.vm = None def wait_for_vm_status(self, status, deadline): log.info("Waiting until vm %s is %s", self.vm.name, status) vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) while True: if time.monotonic() > deadline: raise Timeout( "Timeout waiting until vm {} is {}" .format(self.vm.name, status)) time.sleep(self.conf["poll_interval"]) try: vm = vm_service.get() except sdk.NotFoundError: # Adding vm failed. self.vm = None raise except sdk.Error as e: log.warning("Error polling vm, retrying: %s", e) continue if vm.status == status: break log.debug("VM %s status: %s", self.vm.name, vm.status) # Modifying snapshots. def create_snapshot(self): log.info("Creating snapshot for vm %s", self.vm.name) start = time.monotonic() deadline = start + self.conf["create_snapshot_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) snapshots_service = vm_service.snapshots_service() correlation_id = str(uuid.uuid4()) self.snapshot = snapshots_service.add( types.Snapshot( description='Snapshot 1', persist_memorystate=False ), query={'correlation_id': correlation_id}, ) try: self.wait_for_jobs(correlation_id, deadline) except JobError: self.snapshot = None raise log.info("Snapshot %s for vm %s created in %d seconds", self.snapshot.id, self.vm.name, time.monotonic() - start) def remove_snapshot(self): log.info("Removing snapshot %s for vm %s", self.snapshot.id, self.vm.name) start = time.monotonic() deadline = start + self.conf["remove_snapshot_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) snapshots_service = vm_service.snapshots_service() snapshot_service = snapshots_service.snapshot_service(self.snapshot.id) correlation_id = str(uuid.uuid4()) snapshot_service.remove(query={'correlation_id': correlation_id}) self.wait_for_jobs(correlation_id, deadline) log.info("Snapshot %s for vm %s removed in %d seconds", self.snapshot.id, self.vm.name, time.monotonic() - start) self.snapshot = None # Accessing guest. def write_data(self): try: vm_address = self.find_vm_address() except Timeout: log.warning("Timeout finding vm %s address, skipping write data", self.vm.name) return log.info("Writing data in vm %s address %s", self.vm.name, vm_address) start = time.monotonic() script = b""" for disk in /dev/vd*; do dd if=/dev/zero bs=1M count=%d \ of=$disk oflag=direct conv=fsync & done wait """ % self.conf["write_data_mb"] cmd = [ "ssh", # Disable host key verification, we trust the vm we just created. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-l", "root", vm_address, "bash", "-s", ] log.debug("Running command: %s", cmd) r = subprocess.run( cmd, input=script, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if r.returncode != 0: log.warning("Command failed rc=%r out=%r err=%r", r.returncode, r.stdout, r.stderr) else: log.debug("Command succeeded out=%r err=%r", r.stdout, r.stderr) log.info("Writing data completed in %d seconds", time.monotonic() - start) def find_vm_address(self): start = time.monotonic() deadline = start + self.conf["vm_address_timeout"] vms_service = self.connection.system_service().vms_service() vm_service = vms_service.vm_service(self.vm.id) nics_service = vm_service.nics_service() while True: if time.monotonic() > deadline: raise Timeout( "Timeout waiting for vm {} address".format(self.vm.name)) for nic in nics_service.list(): nic_service = nics_service.nic_service(nic.id) devices_service = nic_service.reported_devices_service() for device in devices_service.list(): device_service = devices_service.reported_device_service(device.id) device = device_service.get() # Sometiems device.ips is None. if device.ips: for ip in device.ips: if ip.version == types.IpVersion.V4: log.info( "Found vm %s address in %d seconds", self.vm.name, time.monotonic() - start) return ip.address time.sleep(self.conf["poll_interval"]) # Polling jobs. def wait_for_jobs(self, correlation_id, deadline): log.info("Waiting for jobs with correlation id %s", correlation_id) while not self.jobs_completed(correlation_id): time.sleep(self.conf["poll_interval"]) if time.monotonic() > deadline: raise Timeout( "Timeout waiting for jobs with correlation id {}" .format(correlation_id)) def jobs_completed(self, correlation_id): """ Return True if all jobs with specified correlation id have completed, False otherwise. Raise JobError if some jobs have failed or aborted. """ jobs_service = self.connection.system_service().jobs_service() try: jobs = jobs_service.list( search="correlation_id={}".format(correlation_id)) except sdk.Error as e: log.warning( "Error searching for jobs with correlation id %s: %s", correlation_id, e) # We dont know, assume that jobs did not complete yet. return False if all(job.status != types.JobStatus.STARTED for job in jobs): # In some cases like create snapshot, it is not be possible to # detect the failure by checking the entity. failed_jobs = [(job.description, str(job.status)) for job in jobs if job.status != types.JobStatus.FINISHED] if failed_jobs: raise JobError( "Some jobs for with correlation id {} have failed: {}" .format(correlation_id, failed_jobs)) return True else: jobs_status = [(job.description, str(job.status)) for job in jobs] log.debug("Some jobs with correlation id %s are running: %s", correlation_id, jobs_status) return False with open("conf.yml") as f: conf = yaml.safe_load(f) logging.basicConfig( level=logging.DEBUG if conf["debug"] else logging.INFO, format="%(asctime)s %(levelname)-7s (%(threadName)s) %(message)s") start = time.monotonic() stats = Counter() runners = [] for i in range(conf["vms_count"]): name = "run/{}".format(i) log.info("Starting runner %s", name) r = Runner(i, conf) t = threading.Thread(target=r.run, name=name, daemon=True) t.start() runners.append((r, t)) log.info("Waiting %d seconds before starting next runner", conf["run_delay"]) time.sleep(conf["run_delay"]) for r, t in runners: log.info("Waiting for runner %s", t.name) t.join() stats["passed"] += r.passed stats["failed"] += r.failed stats["errored"] += r.errored log.info("%d failed, %d passed, %d errored in %d seconds", stats["failed"], stats["passed"], stats["errored"], time.monotonic() - start)
win_low_level_hook.py
class WinLowLevelHook: def __init__(self, keyboard_callback, mouse_callback): import collections self.keyboard_callback = keyboard_callback self.mouse_callback = mouse_callback self.KeyboardEvent = collections.namedtuple('KeyboardEvent', ['event_type', 'key_code', 'scan_code', 'alt_pressed', 'time']) self.hook_id_keyboard = None self.hook_id_mouse = None def start(self, asynchronous=True): if asynchronous: print('async hook') import threading threading.Thread(target=self.start, kwargs={'asynchronous':False}).start() return print('starting hook') import win32con keyboard_event_types = {win32con.WM_KEYDOWN: 'key down', win32con.WM_KEYUP: 'key up', 0x104: 'key down', # WM_SYSKEYDOWN, used for Alt key. 0x105: 'key up', # WM_SYSKEYUP, used for Alt key. } mouse_event_types = {win32con.WM_RBUTTONDOWN: 'RMB down', win32con.WM_RBUTTONUP: 'RMB up', win32con.WM_MOUSEMOVE: 'mouse move' } def low_level_keyboard_handler(nCode, wParam, lParam): event = (nCode, self.KeyboardEvent(keyboard_event_types[wParam], lParam[0], lParam[1], lParam[2] == 32, lParam[3])) # Be a good neighbor and call the next hook. if self.keyboard_callback(event): return ctypes.windll.user32.CallNextHookEx(self.hook_id_keyboard, nCode, wParam, lParam) else: print("#") return 1 def low_level_mouse_handler(nCode, wParam, lParam): event = (nCode, mouse_event_types.get(wParam, 'unknown')) if self.mouse_callback(event): # Be a good neighbor and call the next hook. return ctypes.windll.user32.CallNextHookEx(self.hook_id_mouse, nCode, wParam, lParam) else: return 1 import ctypes CMPFUNC = ctypes.CFUNCTYPE(ctypes.c_int,ctypes. c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_void_p)) # Convert the Python handler into C pointer. pointer_keyboard = CMPFUNC(low_level_keyboard_handler) pointer_mouse = CMPFUNC(low_level_mouse_handler) import win32api # Hook both key up and key down events for common keys (non-system). self.hook_id_keyboard = ctypes.windll.user32.SetWindowsHookExA(win32con.WH_KEYBOARD_LL, pointer_keyboard, win32api.GetModuleHandle(None), 0) self.hook_id_mouse = ctypes.windll.user32.SetWindowsHookExA(win32con.WH_MOUSE_LL, pointer_mouse, win32api.GetModuleHandle(None), 0) # Register to remove the hook when the interpreter exits. Unfortunately a # try/finally block doesn't seem to work here. import atexit atexit.register(ctypes.windll.user32.UnhookWindowsHookEx, self.hook_id_keyboard) atexit.register(ctypes.windll.user32.UnhookWindowsHookEx, self.hook_id_mouse) while True: import win32gui #peek_result, msg = win32gui.PeekMessage(None, 0, 0, 1) result, msg = win32gui.GetMessage(None, 0, 0) print('got msg:', msg) win32gui.TranslateMessage(msg) #print('translated') win32gui.DispatchMessage(msg) #print('sent') #sleep(0.5) pass
interaction.py
from typing import Optional, Any import asyncio import contextlib import enum import pathlib import queue import shlex import threading from wcpan.drive.core.types import Node from wcpan.drive.core.drive import DriveFactory, Drive from .util import print_as_yaml class TokenType(enum.Enum): Global = enum.auto() Path = enum.auto() class ShellContext(object): def __init__(self, drive: 'DriveProxy', home_node: Node) -> None: self._drive = drive self._home = home_node self._cwd = home_node self._actions = { 'help': self._help, 'ls': self._list, 'cd': self._chdir, 'mkdir': self._mkdir, 'sync': self._sync, 'pwd': self._pwd, 'find': self._find, 'info': self._info, 'hash': self._hash, 'id_to_path': self._id_to_path, 'path_to_id': self._path_to_id, } self._cache = ChildrenCache(drive) def get_prompt(self) -> str: if not self._cwd.name: name = '/' else: name = self._cwd.name return f'{name} > ' def complete(self, text: str, state: int) -> Optional[str]: import readline whole_text = readline.get_line_buffer() end_index = readline.get_endidx() type_, token = parse_completion(whole_text, end_index) if type_ == TokenType.Global: values = self._get_global(text) elif type_ == TokenType.Path: values = self._get_path(text, token) else: return None try: return values[state] except IndexError: return None def execute(self, line: str) -> None: cmd = shlex.split(line) if not cmd: return command = cmd[0] if command not in self._actions: print(f'unknown command {command}') return action = self._actions[command] try: action(*cmd[1:]) except TypeError as e: print(e) def _get_global(self, prefix: str) -> list[str]: cmd = self._actions.keys() cmd = [c for c in cmd if c.startswith(prefix)] return cmd def _get_path(self, prefix: str, path: str) -> list[str]: children = self._cache.get(self._cwd, path) children = [c for c in children if c.startswith(prefix)] return children def _help(self) -> None: cmd = self._actions.keys() for c in cmd: print(c) def _list(self, src: str = None) -> None: if not src: node = self._cwd else: path = normalize_path(self._drive, self._cwd, src) node = self._drive.get_node_by_path(path) if not node: print(f'{src} not found') return children = self._drive.get_children(node) for child in children: print(child.name) def _chdir(self, src: str = None) -> None: if not src: self._cwd = self._home return path = normalize_path(self._drive, self._cwd, src) node = self._drive.get_node_by_path(path) if not node: print(f'unknown path {src}') return if not node.is_folder: print(f'{src} is not a folder') return self._cwd = node def _mkdir(self, src: str) -> None: if not src: print(f'invalid name') return self._drive.create_folder(self._cwd, src) def _sync(self) -> None: self._cache.reset() self._drive.sync() def _pwd(self) -> None: print(self._drive.get_path(self._cwd)) def _find(self, src: str) -> None: rv = self._drive.search_by_regex(src) for [id_, path] in rv: print(f'{id_} - {path}') def _info(self, src: str) -> None: node = self._drive.get_node_by_id(src) if not node: print('null') else: print_as_yaml(node.to_dict()) def _hash(self, *args) -> None: rv = self._drive.get_hash_list(self._cwd, args) for [path_or_id, hash_] in rv: print(f'{hash_} - {path_or_id}') def _id_to_path(self, src: str) -> None: node = self._drive.get_node_by_id(src) if not node: print(f'{src} not found') return path = self._drive.get_path(node) print(path) def _path_to_id(self, src: str) -> None: path = normalize_path(self._drive, self._cwd, src) node = self._drive.get_node_by_path(path) if not node: print(f'{src} not found') return print(node.id_) class ChildrenCache(object): def __init__(self, drive: 'DriveProxy') -> None: self._drive = drive self._cache = {} def get(self, cwd: Node, src: str) -> list[str]: key = f'{cwd.id_}:{src}' if key in self._cache: return self._cache[key] path = normalize_path(self._drive, cwd, src) node = self._drive.get_node_by_path(path) if not node: parent_path = path.parent node = self._drive.get_node_by_path(parent_path) children = self._drive.get_children(node) self._cache[key] = [child.name for child in children] return self._cache[key] def reset(self) -> None: self._cache = {} class DriveProxy(object): def __init__(self, factory: DriveFactory) -> None: self._factory = factory self._thread = threading.Thread(target=self._main) self._queue = queue.Queue() self._actions = { 'sync': self._sync, 'get_node_by_path': self._get_node_by_path, 'get_path': self._get_path, 'get_children': self._get_children, 'search_by_regex': self._search_by_regex, 'get_node_by_id': self._get_node_by_id, 'get_hash_list': self._get_hash_list, 'create_folder': self._create_folder, } def __enter__(self) -> 'DriveProxy': self._thread.start() return self def __exit__(self, et, ev, tb) -> bool: self._queue.put(None) self._queue.join() self._thread.join() def _main(self) -> None: assert_off_main_thread() asyncio.run(self._amain()) async def _amain(self) -> None: assert_off_main_thread() async with self._factory() as drive: while True: task = self._queue.get() try: if not task: break if task.action not in self._actions: print(f'unknown action {task.action}') return action = self._actions[task.action] await action(drive, task) except Exception as e: print(e) finally: if task: with task as cv: cv.notify() self._queue.task_done() def sync(self) -> None: task = OffMainThreadTask( action='sync', args=(), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() async def _sync(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() async for change in drive.sync(): print(change) task.return_value = None def get_node_by_path(self, path: pathlib.PurePath) -> Optional[Node]: task = OffMainThreadTask( action='get_node_by_path', args=(path,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _get_node_by_path(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() rv = await drive.get_node_by_path(*task.args, **task.kwargs) task.return_value = rv def get_path(self, node: Node) -> pathlib.PurePath: task = OffMainThreadTask( action='get_path', args=(node,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _get_path(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() rv = await drive.get_path(*task.args, **task.kwargs) task.return_value = rv def get_children(self, node: Node) -> list[Node]: task = OffMainThreadTask( action='get_children', args=(node,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _get_children(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() rv = await drive.get_children(*task.args, **task.kwargs) task.return_value = rv def search_by_regex(self, pattern: str) -> list[tuple[str, str]]: task = OffMainThreadTask( action='search_by_regex', args=(pattern,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _search_by_regex(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() node_list = await drive.find_nodes_by_regex(*task.args, **task.kwargs) path_list = [drive.get_path(node) for node in node_list] path_list = await asyncio.gather(*path_list) id_list = [node.id_ for node in node_list] rv = zip(id_list, path_list) task.return_value = list(rv) def get_node_by_id(self, id_: str) -> Optional[Node]: task = OffMainThreadTask( action='get_node_by_id', args=(id_,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _get_node_by_id(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() rv = await drive.get_node_by_id(*task.args, **task.kwargs) task.return_value = rv def get_hash_list(self, cwd: Node, path_or_id_list: list[str]) -> list[tuple[str, str]]: task = OffMainThreadTask( action='get_hash_list', args=(cwd, path_or_id_list,), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() return task.return_value async def _get_hash_list(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() cwd = task.args[0] path_or_id_list = task.args[1] base_path = await drive.get_path(cwd) node_list = [ get_node_by_path_or_id(drive, base_path, path_or_id) for path_or_id in path_or_id_list ] node_list = await asyncio.gather(*node_list) hash_list = [node.hash_ for node in node_list] rv = zip(path_or_id_list, hash_list) task.return_value = list(rv) def create_folder(self, node: Node, name: str) -> None: task = OffMainThreadTask( action='create_folder', args=(node, name), kwargs={}, ) self._queue.put(task) with task as cv: cv.wait() async def _create_folder(self, drive: Drive, task: 'OffMainThreadTask') -> None: assert_off_main_thread() rv = await drive.create_folder(*task.args, **task.kwargs) task.return_value = rv class OffMainThreadTask(object): def __init__(self, action: str, args: tuple[Any], kwargs=dict[str, Any]) -> None: self._action = action self._args = args self._kwargs = kwargs self._done = threading.Condition() self._raii = None self.return_value = None def __enter__(self) -> threading.Condition: with contextlib.ExitStack() as stack: stack.enter_context(self._done) self._raii = stack.pop_all() return self._done def __exit__(self, et, ev, tb) -> bool: self._raii.close() @property def action(self) -> str: return self._action @property def args(self) -> tuple[Any]: return self._args @property def kwargs(self) -> dict[str, Any]: return self._kwargs def interact(factory: DriveFactory, home_node: Node) -> None: with DriveProxy(factory) as drive: context = ShellContext(drive, home_node) import readline readline.set_completer_delims('/ ') readline.set_completer(context.complete) readline.parse_and_bind('tab: complete') while True: prompt = context.get_prompt() try: line = input(prompt) except EOFError: break context.execute(line) # reset anchor print() def resolve_path( from_: pathlib.PurePath, to: pathlib.PurePath, ) -> pathlib.PurePath: rv = from_ for part in to.parts: if part == '.': continue elif part == '..': rv = rv.parent else: rv = rv / part return rv def normalize_path( drive: DriveProxy, node: Node, string: str, ) -> pathlib.PurePath: path = pathlib.PurePath(string) if not path.is_absolute(): current_path = drive.get_path(node) path = resolve_path(current_path, path) return path def parse_completion(whole_text, end_index): lexer = shlex.shlex(whole_text, posix=True) lexer.whitespace_split = True cmd = [] offset = 0 while True: try: token = lexer.get_token() except ValueError: idx = whole_text.find(token, offset) assert idx >= 0 offset = idx + len(token) cmd.append((idx, lexer.token)) break if token == lexer.eof: break idx = whole_text.find(token, offset) assert idx >= 0 offset = idx + len(token) cmd.append((idx, token)) idx = None token = None token_list = [(idx, offset, token) for idx, (offset, token) in enumerate(cmd)] for idx, offset, token in reversed(token_list): if offset <= end_index: break if idx == 0: return TokenType.Global, token else: return TokenType.Path, token def assert_off_main_thread(): assert threading.current_thread() is not threading.main_thread() async def get_node_by_path_or_id( drive: Drive, cwd: pathlib.PurePath, path_or_id: str, ) -> Node: node = await drive.get_node_by_id(path_or_id) if node: return node path = pathlib.PurePath(path_or_id) if path.is_absolute(): node = await drive.get_node_by_path(path) return node path = resolve_path(cwd, path) node = await drive.get_node_by_path(path) return node
CompMolNWChem_ThermoServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import os import random as _random import sys import traceback from getopt import getopt, GetoptError from multiprocessing import Process from os import environ from wsgiref.simple_server import make_server import requests as _requests from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from biokbase import log from CompMolNWChem_Thermo.authclient import KBaseAuth as _KBaseAuth try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'CompMolNWChem_Thermo'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from CompMolNWChem_Thermo.CompMolNWChem_ThermoImpl import CompMolNWChem_Thermo # noqa @IgnorePep8 impl_CompMolNWChem_Thermo = CompMolNWChem_Thermo(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if len(e.args) == 1: newerr.data = repr(e.args[0]) else: newerr.data = repr(e.args) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if 'types' in self.method_data[request['method']]: self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'CompMolNWChem_Thermo' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_CompMolNWChem_Thermo.run_CompMolNWChem_Thermo, name='CompMolNWChem_Thermo.run_CompMolNWChem_Thermo', types=[dict]) self.method_authentication['CompMolNWChem_Thermo.run_CompMolNWChem_Thermo'] = 'required' # noqa self.rpc_service.add(impl_CompMolNWChem_Thermo.status, name='CompMolNWChem_Thermo.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'CompMolNWChem_Thermo ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception as e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print('Request method was %s\n' % environ['REQUEST_METHOD']) # print('Environment dictionary is:\n%s\n' % pprint.pformat(environ)) # print('Request body was: %s' % request_body) # print('Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result)) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body.encode('utf8')] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print("Monkeypatching std libraries for async") from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print("Listening on port %s" % port) if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print("Host set to %s" % host) else: assert False, "unhandled option" start_server(host=host, port=port) # print("Listening on port %s" % port) # httpd = make_server( host, port, application) # # httpd.serve_forever()
solve.py
#!/usr/bin/env python3 import os,sys from threading import Thread from queue import Queue, Empty import time import socket, errno import re ON_POSIX = 'posix' in sys.builtin_module_names done=False tstart = time.time() print('\n*** SOLVING: hts_bus_challenge -') # as we get chunks of output bytes from the challenge, we convert to # strings and enqueue it for the mainloop. # if there is a >1 sec pause in the output, we enqueue a single None # instead so the mainloop knows we're in a pause. def enqueue_output(out, queue): global done, tstart #fd = out.fileno() #fl = fcntl.fcntl(fd, fcntl.F_GETFL) #fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) didpause=False ts=time.time()-tstart while True: try: stuff = out.recv(64) except socket.error as e: if e.errno != errno.EAGAIN: raise e pts=time.time()-tstart if (pts-ts > 1.0) and not didpause: queue.put(b"") didpause=True time.sleep(0.05) else: didpause=False if stuff == b'': break ts=time.time()-tstart queue.put(stuff.decode("utf-8")) out.close() done=True if __name__ == "__main__": ''' try: p = Popen(['bin/hts_bus_challenge'], stdout=PIPE, stdin=PIPE, bufsize=1, close_fds=ON_POSIX) except Exception as e: print("FAIL: Couldn't Popen bin/hts_bus_challenge : {}".format(e)) exit(-1) ''' Host = os.getenv("HOST", "localhost") Port = int(os.getenv("PORT", 31360)) Ticket = os.getenv("TICKET","") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((Host, Port)) if len(Ticket): sock.recv(128) sock.send( (Ticket + "\n").encode("utf-8") ) sock.setblocking(0) q = Queue() t = Thread(target=enqueue_output, args=(sock, q)) t.daemon = True # thread dies with the program t.start() machine_state=0 # initially, we just want to gather the text of a full poll of the bus- up to the first pause pollstring="" eps_id=0 eep_enable_bit = 0 eep_id = 0 eep_string="" # eep dump gathers here found_needle=False oct_buf = '' eep_text=b'' exfil = '' while not done: try: ch = q.get(timeout=.1) #q.get_nowait() # or q.get(timeout=.1) if machine_state==0: print(ch,end='',flush=True) if ch is b"": # OBC finsihed initial round of bus polling and we're in a pause. # parse the poll string s=pollstring.find('^') pollstring=pollstring[s:] # ^3c+00+00+3e+00+00+00+ eps_id = int(pollstring[1:3],16) eps_flags = int(pollstring[10:12],16) eep_enable_bit = 0x3f-eps_flags toks = pollstring.split('^') eep_id = int(toks[4][0:2],16) output="^{:02x}0000{:02x}.^{:02x}0000".format(eps_id+1,eep_enable_bit,eep_id) output = output.encode('utf-8') print('\r\n\r\n* WISDOMS GAINED:\r\n',end='') print(' EPS I2C ID : 0x{:02x}\r\n'.format(eps_id),end='') print(' EPS EEP ENABLE BIT : 0x{:02x}\r\n'.format(eep_enable_bit),end='') print(' EEP I2C ID : 0x{:02x}\r\n'.format(eep_id),end='') print(' REQUIRED COMMANDS : {}\r\n\r\n'.format(output),end='') # issue the I2C transactions to power off everything but the EEP, # and to start dumping the EEP. sock.sendall(output) needle='^{:02x}+00+00+'.format(eep_id) machine_state=1 else: pollstring+=ch elif machine_state==1: # we're dumping the EEP if ch is not b"": eep_string+=ch if not found_needle: if needle in eep_string: i = eep_string.find(needle) eep_string=eep_string[i+len(needle):] found_needle=True print("* EEP DUMP:\r\n\r\n",end='') else: if len(eep_string)>=1: for c in eep_string: if c in '0123456789abcdefABCDEF': oct_buf+=c if len(oct_buf)==2: o=int(oct_buf,16) if (o>=32) and (o<=126): print("{:c}".format(o),end='',flush=True); exfil += "{:c}".format(o) else: print(".",end='',flush=True) exfil += "." oct_buf='' eep_string='' if '-' in ch: break else: sock.send(b'00') except Empty: pass print('\r\n\r\n') m = re.match('.*(flag{.+?}).*', exfil) if m is not None: print(m.group(1)) else: print("Didn't find flag") print('\r\n\r\nDONE.\r\n\r\n')
backend.py
""" Backend Implementation. Where the back end is implemented. """ from os.path import exists, isfile from threading import Thread from time import time_ns, sleep from typing import Optional from secrets import token_bytes from _sha256 import sha256 from exceptions import InvalidCredentials from exceptions import InvalidAccountFormat from exceptions import AccountExists, AccountNotFound from exceptions import InvalidUsername from exceptions import TooManyAttempts from exceptions import SessionOngoing, NoSession from globaldata import ACCOUNTS_DIRPATH, sep from globaldata import __author__, __version__, __license__ from utils import AESEncrypt from utils import bytes_to_num, num_to_bytes, bytes_fixlen from utils import deletefile __author__ = __author__ __version__ = __version__ __license__ = __license__ class Entry: """ Entry class. Each entry is represented as an Entry object. :param title: The title of the entry. :param entry: The entry text. :param entry_num: The entry number. """ @staticmethod def loads(raw_data: bytes): """ Load Entry from a sequence of bytes. :param raw_data: The raw bytes data to be extracted. :return: Entry instance loaded from raw bytes data. """ r = 0 title_len = bytes_to_num(raw_data[r: r+1]) r += 1 title = raw_data[r: r+title_len].decode('utf-8') r += title_len entry_num = bytes_to_num(raw_data[r: r+4]) r += 4 time_created = bytes_to_num(raw_data[r: r+8]) r += 8 time_last_update = bytes_to_num(raw_data[r: r+8]) r += 8 entry = raw_data[r:].decode('utf-8') dummy_entry = Entry(None, None, None) dummy_entry.title = title dummy_entry.entry = entry dummy_entry.entry_num = entry_num dummy_entry.time_created = time_created dummy_entry.time_last_update = time_last_update return dummy_entry def dumps(self) -> bytes: """ Dump Entry as a sequence of bytes. :return: Entry instance compressed into raw bytes. """ # Next 1 byte title_len = num_to_bytes(len(self.title)) title_len = bytes_fixlen(title_len, 1) # Next N bytes title = self.title.encode('utf-8') # Next 4 bytes entry_num = bytes_fixlen(num_to_bytes(self.entry_num), 4) # Next 8 bytes time_created = num_to_bytes(self.time_created) time_created = bytes_fixlen(time_created, 8) # Next 8 bytes time_last_update = num_to_bytes(self.time_last_update) time_last_update = bytes_fixlen(time_last_update, 8) # Remaining bytes entry = self.entry.encode('utf-8') data = b''.join([title_len, title, entry_num, time_created, time_last_update, entry]) return data def __init__(self, title: str or None, entry: str or None, entry_num: int or None): self.title = title self.entry = entry self.entry_num = entry_num self.time_created = time_ns() self.time_last_update = time_ns() def update(self, title: str, entry: str): """ Update the entry. :param title: The title of the entry. :param entry: The entry text. """ self.title = title self.entry = entry self.time_last_update = time_ns() class Account: """ DevLogs Account class. :param username: The account username. :param raw_password: The account password. :param overwrite: Option to overwrite file data of another account if it already exists. """ # The 256-bit pepper for the encrypted data # Note: Useless if application isn't compiled into a binary PEPPER = (b'\x8e\xa5H\r`\x1dD\xdam\xa0\xb2[?\x8e\xb0\xfe\xb6\xa6\xa5J' b'\xb6l\xc1P)\xdb\xda\x04|=RB') FILTEREDCHARS = '\\/:*?"<>|' + '%' @staticmethod def sanitize_fname(fname: str) -> str: """ Sanitize the filename characters. :param fname: The filename. :return: The sanitized filename. """ return ''.join(map( lambda c: f'%{hex(ord(c))[2:].upper()}' if c in Account.FILTEREDCHARS else c, fname )) @staticmethod def checks_auth(encrypted_data: bytes, raw_password: str) -> bytes or None: """ Check if credentials are valid by checking encrypted_data. :param encrypted_data: The encrypted file data loaded directly from file. :param raw_password: The raw password to be used for decryption. :return: Decrypted account data bytes if password is valid or None if not. """ if raw_password is None: return None if encrypted_data is None or len(encrypted_data) == 0: return '' # Invalid format salt = encrypted_data[:32] password = sha256(Account.PEPPER + raw_password.encode('utf-8') + salt) del raw_password # Remove raw_password data from RAM ASAP raw_data = AESEncrypt(password.digest()).decrypt(encrypted_data[32:]) return raw_data @staticmethod def check_auth(username: str, raw_password: str) -> bytes or None: """ Check if credentials are valid by checking account file. :param username: The username to be used for finding account file. :param raw_password: The raw password to be used for decryption. :return: Decrypted account data bytes if password is valid or None if not. """ username = Account.sanitize_fname(username) filepath = f'{ACCOUNTS_DIRPATH}{sep}{username}.account' try: with open(filepath, 'rb') as file: return Account.checks_auth(file.read(), raw_password) except FileNotFoundError: return None @staticmethod def create_account(username: str, raw_password: str): """ Create a new account. Basically sign up. :param username: The account username. :param raw_password: The account password. """ Account(username, raw_password).dump() # Save change in credentials @staticmethod def delete_account(username: str): """ Delete an account, by deleting its file. :param username: The username to be used for finding account file. """ username = Account.sanitize_fname(username) filepath = f'{ACCOUNTS_DIRPATH}{sep}{username}.account' deletefile(filepath) @staticmethod def loads(encrypted_data: bytes, raw_password: str): """ Load Account from an encrypted sequence of bytes. :param encrypted_data: The encrypted data to be imported. :param raw_password: The raw password to be used for decryption. :return: Account instance loaded from encrypted bytes data. """ raw_data = Account.checks_auth(encrypted_data, raw_password) if raw_data is None: raise InvalidCredentials("Wrong credentials were provided") elif len(raw_data) < 32+32+8+2: raise InvalidAccountFormat salt = token_bytes(32) # Change salt everytime password = sha256(Account.PEPPER + raw_password.encode('utf-8') + salt) del raw_password # Remove raw_password data from RAM ASAP try: r = 0 username = raw_data[r: r+32].decode('utf-8').strip('\x00') r += 32 r += 32 time_created = bytes_to_num(raw_data[r: r+8]) r += 8 num_entries = bytes_to_num(raw_data[r: r+2]) r += 2 entries = [] for entry in range(num_entries): entry_size = bytes_to_num(raw_data[r: r+3]) r += 3 entries.append(Entry.loads(raw_data[r: r+entry_size])) r += entry_size except IndexError: raise InvalidAccountFormat except UnicodeDecodeError: raise InvalidAccountFormat account = Account(None, None) account.username = username account.salt = salt account.password = password.digest() account.time_created = time_created account.entries = entries return account @staticmethod def load(username: str, raw_password: str): """ Load Account from a account file. :param username: The username to be used for finding account file. :param raw_password: The raw password to be used for decryption. :return: Account instance loaded from file. """ username = Account.sanitize_fname(username) filepath = f'{ACCOUNTS_DIRPATH}{sep}{username}.account' try: with open(filepath, 'rb') as file: return Account.loads(file.read(), raw_password) except FileNotFoundError: raise AccountNotFound def dumps(self): """Dump Account as an encrypted sequence of bytes.""" # Next 32 bytes username = self.username.encode('utf-8') username = bytes_fixlen(username, 32) # Next 32 bytes password = self.password # Next 8 bytes time_created = num_to_bytes(self.time_created) time_created = bytes_fixlen(time_created, 8) # Next 2 bytes num_entries = num_to_bytes(len(self.entries)) num_entries = bytes_fixlen(num_entries, 2) # Final N of (3 + M[n]) bytes entries = list(map(lambda e: e.dumps(), self.entries)) entry_sizes = list(map(lambda raw_e: num_to_bytes(len(raw_e)), entries)) entry_sizes = list(map(lambda e_size: bytes_fixlen(e_size, 3), entry_sizes)) entries_data = b''.join([entry_sizes[i] + entries[i] for i in range(len(entries))]) raw_data = b''.join([username, password, time_created, num_entries, entries_data]) # Next 32 bytes salt = self.salt encrypted_data = b''.join([ salt, AESEncrypt(password).encrypt(raw_data) ]) return encrypted_data def dump(self): """Dump Account into a account file.""" username = Account.sanitize_fname(self.username) filepath = f'{ACCOUNTS_DIRPATH}{sep}{username}.account' with open(filepath, 'bw') as file: file.write(self.dumps()) def __init__(self, username: str or None, raw_password: str or None, overwrite: Optional[bool] = False): if username is raw_password is not None: if len(username.encode('utf-8')) > 32: raise InvalidUsername('Username is too long (>32)') salt = token_bytes(32) password = sha256(Account.PEPPER + raw_password.encode('utf-8') + salt) _username = Account.sanitize_fname(username) filepath = f'{ACCOUNTS_DIRPATH}{sep}{_username}.account' if not overwrite: acc_exists = exists(filepath) and isfile(filepath) check_auth = Account.check_auth(username, raw_password) if acc_exists: if check_auth is None: raise InvalidAccountFormat else: raise AccountExists self.username = username self.password = password.digest() self.salt = salt self.time_created = time_ns() self.entries = [] def add_entry(self, title: str, entry: str): """ Add an entry. :param title: The title of the entry. :param entry: The entry text. """ self.entries.append(Entry(title, entry, len(self.entries))) def del_entry(self, entry: Entry): """ Delete an entry. :param entry: The Entry instance. """ index = self.entries.index(entry) self.entries.pop(index) for e in range(index, len(self.entries)): self.entries[e].entry_num -= 1 def move_entry_up(self, entry: Entry): """ Move entry up. :param entry: The Entry instance. """ index = self.entries.index(entry) if index > 0: entry.entry_num -= 1 self.entries.insert(index-1, self.entries.pop(index)) self.entries[index].entry_num += 1 def move_entry_down(self, entry: Entry): """ Move entry up. :param entry: The Entry instance. """ index = self.entries.index(entry) if index < len(self.entries) - 1: entry.entry_num += 1 self.entries.insert(index+1, self.entries.pop(index)) self.entries[index].entry_num -= 1 class Session: """ DevLogs Session class. Class for Account sessions. :param account: The Account instance to create a session of. """ def __init__(self, account): self.account = account self.time_created = time_ns() def __del__(self): """When session is terminated.""" self.account.dump() def lock_account(self): """Prevent any changes to account object and file.""" NotImplemented def add_new_entry(self, title: str, entry: str): """ Add new entry to account entries. :param title: The title of the entry. :param entry: The entry text. """ self.account.add_entry(title, entry) class SessionManager: """ DevLogs Session Manager class. Manages Sessions. """ MAX_FAILED_ATTEMPTS = 5 INIT_HARDLOCK_DELAY = 20 HARDLOCK_DELAY_INCR = 10 def __init__(self): self.current_session = None # Lock when too many attempts are made self.session_hardlock = False # Current amount of failed attempts self.failed_attempts = 0 # Current amount of seconds delay for hardlock self.hardlock_delay = SessionManager.INIT_HARDLOCK_DELAY def hardlock(self): """Prevent any attempts from starting a session.""" def override(): orig_create_session = self.create_session def new_create_session(username: str, raw_password: str): """ Alternate function if hardlocked. :param username: The account username. :param raw_password: The account password. """ raise TooManyAttempts self.create_session = new_create_session self.session_hardlock = True sleep(self.hardlock_delay) self.hardlock_delay += SessionManager.HARDLOCK_DELAY_INCR self.create_session = orig_create_session self.session_hardlock = False Thread(target=override, daemon=True).start() def create_session(self, username: str, raw_password: str): """ Attempt to create a new session. Basically the equivalent of Login. :param username: The account username. :param raw_password: The account password. """ if self.current_session is not None: raise SessionOngoing try: account = Account.load(username, raw_password) except InvalidCredentials: self.failed_attempts += 1 if self.failed_attempts >= SessionManager.MAX_FAILED_ATTEMPTS: self.hardlock() raise except InvalidAccountFormat: self.failed_attempts += 1 if self.failed_attempts >= SessionManager.MAX_FAILED_ATTEMPTS: self.hardlock() raise self.failed_attempts = 0 self.hardlock_delay = SessionManager.INIT_HARDLOCK_DELAY self.current_session = Session(account) def stop_session(self): """ Stop the current session. Basically the equivalent of Logout. """ del self.current_session self.current_session = None def save_account(self): """Save account data to account file.""" try: self.current_session.account.dump() except AttributeError: raise NoSession
multiprocess_vector_env.py
from multiprocessing import Pipe from multiprocessing import Process import signal import warnings from cached_property import cached_property import numpy as np import chainerrl def worker(remote, env_fn): # Ignore CTRL+C in the worker process signal.signal(signal.SIGINT, signal.SIG_IGN) env = env_fn() try: while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) elif cmd == 'spec': remote.send(env.spec) elif cmd == 'seed': remote.send(env.seed(data)) else: raise NotImplementedError finally: env.close() class MultiprocessVectorEnv(chainerrl.env.VectorEnv): """VectorEnv where each env is run in its own subprocess. Args: env_fns (list of callable): List of callables, each of which returns gym.Env that is run in its own subprocess. """ def __init__(self, env_fns): if np.__version__ == '1.16.0': warnings.warn(""" NumPy 1.16.0 can cause severe memory leak in chainerrl.envs.MultiprocessVectorEnv. We recommend using other versions of NumPy. See https://github.com/numpy/numpy/issues/12793 for details. """) # NOQA nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = \ [Process(target=worker, args=(work_remote, env_fn)) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.last_obs = [None] * self.num_envs self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() self.closed = False def __del__(self): if not self.closed: self.close() @cached_property def spec(self): self._assert_not_closed() self.remotes[0].send(('spec', None)) spec = self.remotes[0].recv() return spec def step(self, actions): self._assert_not_closed() for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] self.last_obs, rews, dones, infos = zip(*results) return self.last_obs, rews, dones, infos def reset(self, mask=None): self._assert_not_closed() if mask is None: mask = np.zeros(self.num_envs) for m, remote in zip(mask, self.remotes): if not m: remote.send(('reset', None)) obs = [remote.recv() if not m else o for m, remote, o in zip(mask, self.remotes, self.last_obs)] self.last_obs = obs return obs def close(self): self._assert_not_closed() self.closed = True for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def seed(self, seeds=None): self._assert_not_closed() if seeds is not None: if isinstance(seeds, int): seeds = [seeds] * self.num_envs elif isinstance(seeds, list): if len(seeds) != self.num_envs: raise ValueError( "length of seeds must be same as num_envs {}" .format(self.num_envs)) else: raise TypeError( "Type of Seeds {} is not supported.".format(type(seeds))) else: seeds = [None] * self.num_envs for remote, seed in zip(self.remotes, seeds): remote.send(('seed', seed)) results = [remote.recv() for remote in self.remotes] return results @property def num_envs(self): return len(self.remotes) def _assert_not_closed(self): assert not self.closed, "This env is already closed"
train.py
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016 """Train""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 from datetime import datetime import os.path import sys import time import numpy as np from six.moves import xrange import tensorflow as tf import threading from config import * from dataset import pascal_voc, kitti from utils.util import sparse_to_dense, bgr_to_rgb, bbox_transform from nets import * FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', 'KITTI', """Currently only support KITTI dataset.""") tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""") tf.app.flags.DEFINE_string('image_set', 'train', """ Can be train, trainval, val, or test""") tf.app.flags.DEFINE_string('year', '2007', """VOC challenge year. 2007 or 2012""" """Only used for Pascal VOC dataset""") tf.app.flags.DEFINE_string('train_dir', '/tmp/bichen/logs/squeezeDet/train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 1000000, """Maximum number of batches to run.""") tf.app.flags.DEFINE_string('net', 'squeezeDet', """Neural net architecture. """) tf.app.flags.DEFINE_string('pretrained_model_path', '', """Path to the pretrained model.""") tf.app.flags.DEFINE_integer('summary_step', 10, """Number of steps to save summary.""") tf.app.flags.DEFINE_integer('checkpoint_step', 1000, """Number of steps to save summary.""") tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""") def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'): assert form == 'center' or form == 'diagonal', \ 'bounding box format not accepted: {}.'.format(form) for bbox, label in zip(box_list, label_list): if form == 'center': bbox = bbox_transform(bbox) xmin, ymin, xmax, ymax = [int(b) for b in bbox] l = label.split(':')[0] # text before "CLASS: (PROB)" if cdict and l in cdict: c = cdict[l] else: c = color # draw box cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1) # draw label font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1) def _viz_prediction_result(model, images, bboxes, labels, batch_det_bbox, batch_det_class, batch_det_prob): mc = model.mc for i in range(len(images)): # draw ground truth _draw_box( images[i], bboxes[i], [mc.CLASS_NAMES[idx] for idx in labels[i]], (0, 255, 0)) # draw prediction det_bbox, det_prob, det_class = model.filter_prediction( batch_det_bbox[i], batch_det_prob[i], batch_det_class[i]) keep_idx = [idx for idx in range(len(det_prob)) \ if det_prob[idx] > mc.PLOT_PROB_THRESH] det_bbox = [det_bbox[idx] for idx in keep_idx] det_prob = [det_prob[idx] for idx in keep_idx] det_class = [det_class[idx] for idx in keep_idx] _draw_box( images[i], det_bbox, [mc.CLASS_NAMES[idx]+': (%.2f)'% prob \ for idx, prob in zip(det_class, det_prob)], (0, 0, 255)) def train(): """Train SqueezeDet model""" assert FLAGS.dataset == 'KITTI', \ 'Currently only support KITTI dataset' os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu with tf.Graph().as_default(): assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \ or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \ 'Selected neural net architecture not supported: {}'.format(FLAGS.net) if FLAGS.net == 'vgg16': mc = kitti_vgg16_config() mc.IS_TRAINING = True mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path model = VGG16ConvDet(mc) elif FLAGS.net == 'resnet50': mc = kitti_res50_config() mc.IS_TRAINING = True mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path model = ResNet50ConvDet(mc) elif FLAGS.net == 'squeezeDet': mc = kitti_squeezeDet_config() mc.IS_TRAINING = True mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path model = SqueezeDet(mc) elif FLAGS.net == 'squeezeDet+': mc = kitti_squeezeDetPlus_config() mc.IS_TRAINING = True mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path model = SqueezeDetPlus(mc) imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc) # save model size, flops, activations by layers with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f: f.write('Number of parameter by layer:\n') count = 0 for c in model.model_size_counter: f.write('\t{}: {}\n'.format(c[0], c[1])) count += c[1] f.write('\ttotal: {}\n'.format(count)) count = 0 f.write('\nActivation size by layer:\n') for c in model.activation_counter: f.write('\t{}: {}\n'.format(c[0], c[1])) count += c[1] f.write('\ttotal: {}\n'.format(count)) count = 0 f.write('\nNumber of flops by layer:\n') for c in model.flop_counter: f.write('\t{}: {}\n'.format(c[0], c[1])) count += c[1] f.write('\ttotal: {}\n'.format(count)) f.close() print ('Model statistics saved to {}.'.format( os.path.join(FLAGS.train_dir, 'model_metrics.txt'))) def _load_data(load_to_placeholder=True): # read batch input image_per_batch, label_per_batch, box_delta_per_batch, aidx_per_batch, \ bbox_per_batch = imdb.read_batch() label_indices, bbox_indices, box_delta_values, mask_indices, box_values, \ = [], [], [], [], [] aidx_set = set() num_discarded_labels = 0 num_labels = 0 for i in range(len(label_per_batch)): # batch_size for j in range(len(label_per_batch[i])): # number of annotations num_labels += 1 if (i, aidx_per_batch[i][j]) not in aidx_set: aidx_set.add((i, aidx_per_batch[i][j])) label_indices.append( [i, aidx_per_batch[i][j], label_per_batch[i][j]]) mask_indices.append([i, aidx_per_batch[i][j]]) bbox_indices.extend( [[i, aidx_per_batch[i][j], k] for k in range(4)]) box_delta_values.extend(box_delta_per_batch[i][j]) box_values.extend(bbox_per_batch[i][j]) else: num_discarded_labels += 1 if mc.DEBUG_MODE: print ('Warning: Discarded {}/({}) labels that are assigned to the same ' 'anchor'.format(num_discarded_labels, num_labels)) if load_to_placeholder: image_input = model.ph_image_input input_mask = model.ph_input_mask box_delta_input = model.ph_box_delta_input box_input = model.ph_box_input labels = model.ph_labels else: image_input = model.image_input input_mask = model.input_mask box_delta_input = model.box_delta_input box_input = model.box_input labels = model.labels feed_dict = { image_input: image_per_batch, input_mask: np.reshape( sparse_to_dense( mask_indices, [mc.BATCH_SIZE, mc.ANCHORS], [1.0]*len(mask_indices)), [mc.BATCH_SIZE, mc.ANCHORS, 1]), box_delta_input: sparse_to_dense( bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4], box_delta_values), box_input: sparse_to_dense( bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4], box_values), labels: sparse_to_dense( label_indices, [mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES], [1.0]*len(label_indices)), } return feed_dict, image_per_batch, label_per_batch, bbox_per_batch def _enqueue(sess, coord): try: while not coord.should_stop(): feed_dict, _, _, _ = _load_data() sess.run(model.enqueue_op, feed_dict=feed_dict) if mc.DEBUG_MODE: print ("added to the queue") if mc.DEBUG_MODE: print ("Finished enqueue") except Exception, e: coord.request_stop(e) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) init = tf.global_variables_initializer() sess.run(init) coord = tf.train.Coordinator() if mc.NUM_THREAD > 0: enq_threads = [] for _ in range(mc.NUM_THREAD): enq_thread = threading.Thread(target=_enqueue, args=[sess, coord]) # enq_thread.isDaemon() enq_thread.start() enq_threads.append(enq_thread) threads = tf.train.start_queue_runners(coord=coord, sess=sess) run_options = tf.RunOptions(timeout_in_ms=60000) # try: for step in xrange(FLAGS.max_steps): if coord.should_stop(): sess.run(model.FIFOQueue.close(cancel_pending_enqueues=True)) coord.request_stop() coord.join(threads) break start_time = time.time() if step % FLAGS.summary_step == 0: feed_dict, image_per_batch, label_per_batch, bbox_per_batch = \ _load_data(load_to_placeholder=False) op_list = [ model.train_op, model.loss, summary_op, model.det_boxes, model.det_probs, model.det_class, model.conf_loss, model.bbox_loss, model.class_loss ] _, loss_value, summary_str, det_boxes, det_probs, det_class, \ conf_loss, bbox_loss, class_loss = sess.run( op_list, feed_dict=feed_dict) _viz_prediction_result( model, image_per_batch, bbox_per_batch, label_per_batch, det_boxes, det_class, det_probs) image_per_batch = bgr_to_rgb(image_per_batch) viz_summary = sess.run( model.viz_op, feed_dict={model.image_to_show: image_per_batch}) summary_writer.add_summary(summary_str, step) summary_writer.add_summary(viz_summary, step) summary_writer.flush() print ('conf_loss: {}, bbox_loss: {}, class_loss: {}'. format(conf_loss, bbox_loss, class_loss)) else: if mc.NUM_THREAD > 0: _, loss_value, conf_loss, bbox_loss, class_loss = sess.run( [model.train_op, model.loss, model.conf_loss, model.bbox_loss, model.class_loss], options=run_options) else: feed_dict, _, _, _ = _load_data(load_to_placeholder=False) _, loss_value, conf_loss, bbox_loss, class_loss = sess.run( [model.train_op, model.loss, model.conf_loss, model.bbox_loss, model.class_loss], feed_dict=feed_dict) duration = time.time() - start_time assert not np.isnan(loss_value), \ 'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \ 'class_loss: {}'.format(loss_value, conf_loss, bbox_loss, class_loss) if step % 10 == 0: num_images_per_step = mc.BATCH_SIZE images_per_sec = num_images_per_step / duration sec_per_batch = float(duration) format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), step, loss_value, images_per_sec, sec_per_batch)) sys.stdout.flush() # Save the model checkpoint periodically. if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) # except Exception, e: # coord.request_stop(e) # finally: # coord.request_stop() # coord.join(threads) def main(argv=None): # pylint: disable=unused-argument if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) train() if __name__ == '__main__': tf.app.run()
test_mix.py
import pdb import copy import pytest import threading import datetime import logging from time import sleep from multiprocessing import Process import sklearn.preprocessing from utils.utils import * from common.common_type import CaseLabel index_file_size = 10 vectors = gen_vectors(10000, default_dim) vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2') vectors = vectors.tolist() top_k = 1 nprobe = 1 epsilon = 0.001 nlist = 128 # index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384} default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 16384}, "metric_type": "L2"} class TestMixBase: # TODO @pytest.mark.tags(CaseLabel.L2) def _test_mix_base(self, connect, collection): nb = 200000 nq = 5 entities = gen_entities(nb=nb) ids = connect.insert(collection, entities) assert len(ids) == nb connect.flush([collection]) connect.create_index(collection, default_float_vec_field_name, default_index) index = connect.describe_index(collection, "") create_target_index(default_index, default_float_vec_field_name) assert index == default_index query, vecs = gen_query_vectors(default_float_vec_field_name, entities, default_top_k, nq) connect.load_collection(collection) res = connect.search(collection, query) assert len(res) == nq assert len(res[0]) == default_top_k assert res[0]._distances[0] <= epsilon assert check_id_result(res[0], ids[0]) # disable @pytest.mark.tags(CaseLabel.L2) def _test_search_during_createIndex(self, args): loops = 10000 collection = gen_unique_str() query_vecs = [vectors[0], vectors[1]] uri = "tcp://%s:%s" % (args["ip"], args["port"]) id_0 = 0; id_1 = 0 milvus_instance = get_milvus(args["handler"]) # milvus_instance.connect(uri=uri) milvus_instance.create_collection({'collection_name': collection, 'dimension': default_dim, 'index_file_size': index_file_size, 'metric_type': "L2"}) for i in range(10): status, ids = milvus_instance.bulk_insert(collection, vectors) # logging.getLogger().info(ids) if i == 0: id_0 = ids[0]; id_1 = ids[1] # def create_index(milvus_instance): # logging.getLogger().info("In create index") # status = milvus_instance.create_index(collection, index_params) # logging.getLogger().info(status) # status, result = milvus_instance.get_index_info(collection) # logging.getLogger().info(result) def insert(milvus_instance): logging.getLogger().info("In add vectors") status, ids = milvus_instance.bulk_insert(collection, vectors) logging.getLogger().info(status) def search(milvus_instance): logging.getLogger().info("In search vectors") for i in range(loops): status, result = milvus_instance.search(collection, top_k, nprobe, query_vecs) logging.getLogger().info(status) assert result[0][0].id == id_0 assert result[1][0].id == id_1 milvus_instance = get_milvus(args["handler"]) # milvus_instance.connect(uri=uri) p_search = Process(target=search, args=(milvus_instance,)) p_search.start() milvus_instance = get_milvus(args["handler"]) # milvus_instance.connect(uri=uri) p_create = Process(target=insert, args=(milvus_instance,)) p_create.start() p_create.join() @pytest.mark.tags(CaseLabel.L2) def _test_mix_multi_collections(self, connect): ''' target: test functions with multiple collections of different metric_types and index_types method: create 60 collections which 30 are L2 and the other are IP, add vectors into them and test describe index and search expected: status ok ''' nq = 10000 collection_list = [] idx = [] index_param = {'nlist': nlist} # create collection and add vectors for i in range(30): collection_name = gen_unique_str('test_mix_multi_collections') collection_list.append(collection_name) param = {'collection_name': collection_name, 'dimension': default_dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} connect.create_collection(param) status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors) idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) assert status.OK() for i in range(30): collection_name = gen_unique_str('test_mix_multi_collections') collection_list.append(collection_name) param = {'collection_name': collection_name, 'dimension': default_dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} connect.create_collection(param) status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors) assert status.OK() status = connect.flush([collection_name]) assert status.OK() idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) assert status.OK() for i in range(10): status = connect.create_index(collection_list[i], IndexType.FLAT, index_param) assert status.OK() status = connect.create_index(collection_list[30 + i], IndexType.FLAT, index_param) assert status.OK() status = connect.create_index(collection_list[10 + i], IndexType.IVFLAT, index_param) assert status.OK() status = connect.create_index(collection_list[40 + i], IndexType.IVFLAT, index_param) assert status.OK() status = connect.create_index(collection_list[20 + i], IndexType.IVF_SQ8, index_param) assert status.OK() status = connect.create_index(collection_list[50 + i], IndexType.IVF_SQ8, index_param) assert status.OK() # describe index for i in range(10): status, result = connect.get_index_info(collection_list[i]) assert result._index_type == IndexType.FLAT status, result = connect.get_index_info(collection_list[10 + i]) assert result._index_type == IndexType.IVFLAT status, result = connect.get_index_info(collection_list[20 + i]) assert result._index_type == IndexType.IVF_SQ8 status, result = connect.get_index_info(collection_list[30 + i]) assert result._index_type == IndexType.FLAT status, result = connect.get_index_info(collection_list[40 + i]) assert result._index_type == IndexType.IVFLAT status, result = connect.get_index_info(collection_list[50 + i]) assert result._index_type == IndexType.IVF_SQ8 # search query_vecs = [vectors[0], vectors[10], vectors[20]] for i in range(60): collection = collection_list[i] status, result = connect.search(collection, top_k, query_records=query_vecs, params={"nprobe": 1}) assert status.OK() assert len(result) == len(query_vecs) logging.getLogger().info(i) for j in range(len(query_vecs)): assert len(result[j]) == top_k for j in range(len(query_vecs)): if not check_result(result[j], idx[3 * i + j]): logging.getLogger().info(result[j]._id_list) logging.getLogger().info(idx[3 * i + j]) assert check_result(result[j], idx[3 * i + j]) def check_result(result, id): if len(result) >= 5: return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id] else: return id in (i.id for i in result) def check_id_result(result, id): limit_in = 5 ids = [entity.id for entity in result] if len(result) >= limit_in: return id in ids[:limit_in] else: return id in ids
loader.py
""" The Salt loader is the core to Salt's plugin system, the loader scans directories for python loadable code and organizes the code into the plugin interfaces used by Salt. """ import contextvars import copy import functools import importlib.machinery # pylint: disable=no-name-in-module,import-error import importlib.util # pylint: disable=no-name-in-module,import-error import inspect import logging import os import re import sys import tempfile import threading import time import traceback import types from collections.abc import MutableMapping from zipimport import zipimporter import salt.config import salt.defaults.events import salt.defaults.exitcodes import salt.loader_context import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.dictupdate import salt.utils.event import salt.utils.files import salt.utils.lazy import salt.utils.odict import salt.utils.platform import salt.utils.stringutils import salt.utils.versions from salt.exceptions import LoaderError from salt.ext import six from salt.ext.six.moves import reload_module from salt.template import check_render_pipe_str from salt.utils.decorators import Depends try: import pkg_resources HAS_PKG_RESOURCES = True except ImportError: HAS_PKG_RESOURCES = False log = logging.getLogger(__name__) SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR) LOADED_BASE_NAME = "salt.loaded" # pylint: disable=no-member MODULE_KIND_SOURCE = 1 MODULE_KIND_COMPILED = 2 MODULE_KIND_EXTENSION = 3 MODULE_KIND_PKG_DIRECTORY = 5 SUFFIXES = [] for suffix in importlib.machinery.EXTENSION_SUFFIXES: SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION)) for suffix in importlib.machinery.SOURCE_SUFFIXES: SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE)) for suffix in importlib.machinery.BYTECODE_SUFFIXES: SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED)) MODULE_KIND_MAP = { MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader, MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader, MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader, } # pylint: enable=no-member PY3_PRE_EXT = re.compile(r"\.cpython-{}{}(\.opt-[1-9])?".format(*sys.version_info[:2])) # Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *` # which simplifies code readability, it adds some unsupported functions into # the driver's module scope. # We list un-supported functions here. These will be removed from the loaded. # TODO: remove the need for this cross-module code. Maybe use NotImplemented LIBCLOUD_FUNCS_NOT_SUPPORTED = ( "parallels.avail_sizes", "parallels.avail_locations", "proxmox.avail_sizes", ) # Will be set to pyximport module at runtime if cython is enabled in config. pyximport = None def static_loader( opts, ext_type, tag, pack=None, int_type=None, ext_dirs=True, ext_type_dirs=None, base_path=None, filter_name=None, ): funcs = LazyLoader( _module_dirs( opts, ext_type, tag, int_type, ext_dirs, ext_type_dirs, base_path, ), opts, tag=tag, pack=pack, ) ret = {} funcs._load_all() if filter_name: funcs = FilterDictWrapper(funcs, filter_name) for key in funcs: ret[key] = funcs[key] return ret def _format_entrypoint_target(ep): """ Makes a string describing the target of an EntryPoint object. Base strongly on EntryPoint.__str__(). """ s = ep.module_name if ep.attrs: s += ":" + ".".join(ep.attrs) return s def _module_dirs( opts, ext_type, tag=None, int_type=None, ext_dirs=True, ext_type_dirs=None, base_path=None, ): if tag is None: tag = ext_type sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type) ext_types = os.path.join(opts["extension_modules"], ext_type) ext_type_types = [] if ext_dirs: if ext_type_dirs is None: ext_type_dirs = "{}_dirs".format(tag) if ext_type_dirs in opts: ext_type_types.extend(opts[ext_type_dirs]) if HAS_PKG_RESOURCES and ext_type_dirs: for entry_point in pkg_resources.iter_entry_points( "salt.loader", ext_type_dirs ): try: loaded_entry_point = entry_point.load() for path in loaded_entry_point(): ext_type_types.append(path) except Exception as exc: # pylint: disable=broad-except log.error( "Error getting module directories from %s: %s", _format_entrypoint_target(entry_point), exc, ) log.debug( "Full backtrace for module directories error", exc_info=True ) cli_module_dirs = [] # The dirs can be any module dir, or a in-tree _{ext_type} dir for _dir in opts.get("module_dirs", []): # Prepend to the list to match cli argument ordering maybe_dir = os.path.join(_dir, ext_type) if os.path.isdir(maybe_dir): cli_module_dirs.insert(0, maybe_dir) continue maybe_dir = os.path.join(_dir, "_{}".format(ext_type)) if os.path.isdir(maybe_dir): cli_module_dirs.insert(0, maybe_dir) return cli_module_dirs + ext_type_types + [ext_types, sys_types] def minion_mods( opts, context=None, utils=None, whitelist=None, initial_load=False, loaded_base_name=None, notify=False, static_modules=None, proxy=None, ): """ Load execution modules Returns a dictionary of execution modules appropriate for the current system by evaluating the __virtual__() function in each module. :param dict opts: The Salt options dictionary :param dict context: A Salt context that should be made present inside generated modules in __context__ :param dict utils: Utility functions which should be made available to Salt modules in __utils__. See `utils_dirs` in salt.config for additional information about configuration. :param list whitelist: A list of modules which should be whitelisted. :param bool initial_load: Deprecated flag! Unused. :param str loaded_base_name: A string marker for the loaded base name. :param bool notify: Flag indicating that an event should be fired upon completion of module loading. .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') __grains__ = salt.loader.grains(__opts__) __opts__['grains'] = __grains__ __utils__ = salt.loader.utils(__opts__) __salt__ = salt.loader.minion_mods(__opts__, utils=__utils__) __salt__['test.ping']() """ # TODO Publish documentation for module whitelisting if not whitelist: whitelist = opts.get("whitelist_modules", None) ret = LazyLoader( _module_dirs(opts, "modules", "module"), opts, tag="module", pack={"__context__": context, "__utils__": utils, "__proxy__": proxy}, whitelist=whitelist, loaded_base_name=loaded_base_name, static_modules=static_modules, extra_module_dirs=utils.module_dirs if utils else None, pack_self="__salt__", ) # Load any provider overrides from the configuration file providers option # Note: Providers can be pkg, service, user or group - not to be confused # with cloud providers. providers = opts.get("providers", False) if providers and isinstance(providers, dict): for mod in providers: # sometimes providers opts is not to diverge modules but # for other configuration try: funcs = raw_mod(opts, providers[mod], ret) except TypeError: break else: if funcs: for func in funcs: f_key = "{}{}".format(mod, func[func.rindex(".") :]) ret[f_key] = funcs[func] if notify: with salt.utils.event.get_event("minion", opts=opts, listen=False) as evt: evt.fire_event( {"complete": True}, tag=salt.defaults.events.MINION_MOD_REFRESH_COMPLETE ) return ret def raw_mod(opts, name, functions, mod="modules"): """ Returns a single module loaded raw and bypassing the __virtual__ function .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') testmod = salt.loader.raw_mod(__opts__, 'test', None) testmod['test.ping']() """ loader = LazyLoader( _module_dirs(opts, mod, "module"), opts, tag="rawmodule", virtual_enable=False, pack={"__salt__": functions}, ) # if we don't have the module, return an empty dict if name not in loader.file_mapping: return {} loader._load_module(name) # load a single module (the one passed in) return dict(loader._dict) # return a copy of *just* the funcs for `name` def metaproxy(opts, loaded_base_name=None): """ Return functions used in the meta proxy """ return LazyLoader( _module_dirs(opts, "metaproxy"), opts, tag="metaproxy", loaded_base_name=loaded_base_name, ) def matchers(opts): """ Return the matcher services plugins """ return LazyLoader(_module_dirs(opts, "matchers"), opts, tag="matchers") def engines(opts, functions, runners, utils, proxy=None): """ Return the master services plugins """ pack = { "__salt__": functions, "__runners__": runners, "__proxy__": proxy, "__utils__": utils, } return LazyLoader( _module_dirs(opts, "engines"), opts, tag="engines", pack=pack, extra_module_dirs=utils.module_dirs if utils else None, ) def proxy( opts, functions=None, returners=None, whitelist=None, utils=None, context=None, pack_self="__proxy__", ): """ Returns the proxy module for this salt-proxy-minion """ return LazyLoader( _module_dirs(opts, "proxy"), opts, tag="proxy", pack={ "__salt__": functions, "__ret__": returners, "__utils__": utils, "__context__": context, }, extra_module_dirs=utils.module_dirs if utils else None, pack_self=pack_self, ) def returners(opts, functions, whitelist=None, context=None, proxy=None): """ Returns the returner modules """ return LazyLoader( _module_dirs(opts, "returners", "returner"), opts, tag="returner", whitelist=whitelist, pack={"__salt__": functions, "__context__": context, "__proxy__": proxy or {}}, ) def utils(opts, whitelist=None, context=None, proxy=proxy, pack_self=None): """ Returns the utility modules """ return LazyLoader( _module_dirs(opts, "utils", ext_type_dirs="utils_dirs"), opts, tag="utils", whitelist=whitelist, pack={"__context__": context, "__proxy__": proxy or {}}, pack_self=pack_self, ) def pillars(opts, functions, context=None): """ Returns the pillars modules """ _utils = utils(opts) ret = LazyLoader( _module_dirs(opts, "pillar"), opts, tag="pillar", pack={"__salt__": functions, "__context__": context, "__utils__": _utils}, extra_module_dirs=_utils.module_dirs, pack_self="__ext_pillar__", ) return FilterDictWrapper(ret, ".ext_pillar") def tops(opts): """ Returns the tops modules """ if "master_tops" not in opts: return {} whitelist = list(opts["master_tops"].keys()) ret = LazyLoader( _module_dirs(opts, "tops", "top"), opts, tag="top", whitelist=whitelist, ) return FilterDictWrapper(ret, ".top") def wheels(opts, whitelist=None, context=None): """ Returns the wheels modules """ if context is None: context = {} return LazyLoader( _module_dirs(opts, "wheel"), opts, tag="wheel", whitelist=whitelist, pack={"__context__": context}, ) def outputters(opts): """ Returns the outputters modules :param dict opts: The Salt options dictionary :returns: LazyLoader instance, with only outputters present in the keyspace """ ret = LazyLoader( _module_dirs(opts, "output", ext_type_dirs="outputter_dirs"), opts, tag="output", ) wrapped_ret = FilterDictWrapper(ret, ".output") # TODO: this name seems terrible... __salt__ should always be execution mods ret.pack["__salt__"] = wrapped_ret return wrapped_ret def serializers(opts): """ Returns the serializers modules :param dict opts: The Salt options dictionary :returns: LazyLoader instance, with only serializers present in the keyspace """ return LazyLoader(_module_dirs(opts, "serializers"), opts, tag="serializers",) def eauth_tokens(opts): """ Returns the tokens modules :param dict opts: The Salt options dictionary :returns: LazyLoader instance, with only token backends present in the keyspace """ return LazyLoader(_module_dirs(opts, "tokens"), opts, tag="tokens",) def auth(opts, whitelist=None): """ Returns the auth modules :param dict opts: The Salt options dictionary :returns: LazyLoader """ return LazyLoader( _module_dirs(opts, "auth"), opts, tag="auth", whitelist=whitelist, pack={"__salt__": minion_mods(opts)}, ) def fileserver(opts, backends): """ Returns the file server modules """ _utils = utils(opts) if backends is not None: if not isinstance(backends, list): backends = [backends] # If backend is a VCS, add both the '-fs' and non '-fs' versions to the list. # Use a set to keep them unique backend_set = set() vcs_re = re.compile("^(git|svn|hg)(?:fs)?$") for backend in backends: match = vcs_re.match(backend) if match: backend_set.add(match.group(1)) backend_set.add(match.group(1) + "fs") else: backend_set.add(backend) backends = list(backend_set) return LazyLoader( _module_dirs(opts, "fileserver"), opts, tag="fileserver", whitelist=backends, pack={"__utils__": _utils}, extra_module_dirs=_utils.module_dirs, ) def roster(opts, runner=None, utils=None, whitelist=None): """ Returns the roster modules """ return LazyLoader( _module_dirs(opts, "roster"), opts, tag="roster", whitelist=whitelist, pack={"__runner__": runner, "__utils__": utils}, extra_module_dirs=utils.module_dirs if utils else None, ) def thorium(opts, functions, runners): """ Load the thorium runtime modules """ pack = {"__salt__": functions, "__runner__": runners, "__context__": {}} ret = LazyLoader(_module_dirs(opts, "thorium"), opts, tag="thorium", pack=pack) ret.pack["__thorium__"] = ret return ret def states( opts, functions, utils, serializers, whitelist=None, proxy=None, context=None ): """ Returns the state modules :param dict opts: The Salt options dictionary :param dict functions: A dictionary of minion modules, with module names as keys and funcs as values. .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') statemods = salt.loader.states(__opts__, None, None) """ if context is None: context = {} return LazyLoader( _module_dirs(opts, "states"), opts, tag="states", pack={ "__salt__": functions, "__proxy__": proxy or {}, "__utils__": utils, "__serializers__": serializers, "__context__": context, }, whitelist=whitelist, extra_module_dirs=utils.module_dirs if utils else None, pack_self="__states__", ) def beacons(opts, functions, context=None, proxy=None): """ Load the beacon modules :param dict opts: The Salt options dictionary :param dict functions: A dictionary of minion modules, with module names as keys and funcs as values. """ return LazyLoader( _module_dirs(opts, "beacons"), opts, tag="beacons", pack={"__context__": context, "__salt__": functions, "__proxy__": proxy or {}}, virtual_funcs=[], ) def log_handlers(opts): """ Returns the custom logging handler modules :param dict opts: The Salt options dictionary """ ret = LazyLoader( _module_dirs( opts, "log_handlers", int_type="handlers", base_path=os.path.join(SALT_BASE_PATH, "log"), ), opts, tag="log_handlers", ) return FilterDictWrapper(ret, ".setup_handlers") def ssh_wrapper(opts, functions=None, context=None): """ Returns the custom logging handler modules """ return LazyLoader( _module_dirs( opts, "wrapper", base_path=os.path.join(SALT_BASE_PATH, os.path.join("client", "ssh")), ), opts, tag="wrapper", pack={ "__salt__": functions, # "__grains__": opts.get("grains", {}), # "__pillar__": opts.get("pillar", {}), "__context__": context, }, ) def render(opts, functions, states=None, proxy=None, context=None): """ Returns the render modules """ if context is None: context = {} pack = { "__salt__": functions, "__grains__": opts.get("grains", {}), "__context__": context, } if states: pack["__states__"] = states if proxy is None: proxy = {} pack["__proxy__"] = proxy ret = LazyLoader( _module_dirs(opts, "renderers", "render", ext_type_dirs="render_dirs",), opts, tag="render", pack=pack, ) rend = FilterDictWrapper(ret, ".render") if not check_render_pipe_str( opts["renderer"], rend, opts["renderer_blacklist"], opts["renderer_whitelist"] ): err = ( "The renderer {} is unavailable, this error is often because " "the needed software is unavailable".format(opts["renderer"]) ) log.critical(err) raise LoaderError(err) return rend def grain_funcs(opts, proxy=None, context=None): """ Returns the grain functions .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') grainfuncs = salt.loader.grain_funcs(__opts__) """ _utils = utils(opts, proxy=proxy) pack = {"__utils__": utils(opts, proxy=proxy), "__context__": context} ret = LazyLoader( _module_dirs(opts, "grains", "grain", ext_type_dirs="grains_dirs",), opts, tag="grains", extra_module_dirs=_utils.module_dirs, pack=pack, ) ret.pack["__utils__"] = _utils return ret def _format_cached_grains(cached_grains): """ Returns cached grains with fixed types, like tuples. """ if cached_grains.get("osrelease_info"): osrelease_info = cached_grains["osrelease_info"] if isinstance(osrelease_info, list): cached_grains["osrelease_info"] = tuple(osrelease_info) return cached_grains def _load_cached_grains(opts, cfn): """ Returns the grains cached in cfn, or None if the cache is too old or is corrupted. """ if not os.path.isfile(cfn): log.debug("Grains cache file does not exist.") return None grains_cache_age = int(time.time() - os.path.getmtime(cfn)) if grains_cache_age > opts.get("grains_cache_expiration", 300): log.debug( "Grains cache last modified %s seconds ago and cache " "expiration is set to %s. Grains cache expired. " "Refreshing.", grains_cache_age, opts.get("grains_cache_expiration", 300), ) return None if opts.get("refresh_grains_cache", False): log.debug("refresh_grains_cache requested, Refreshing.") return None log.debug("Retrieving grains from cache") try: serial = salt.payload.Serial(opts) with salt.utils.files.fopen(cfn, "rb") as fp_: cached_grains = salt.utils.data.decode( serial.load(fp_), preserve_tuples=True ) if not cached_grains: log.debug("Cached grains are empty, cache might be corrupted. Refreshing.") return None return _format_cached_grains(cached_grains) except OSError: return None def grains(opts, force_refresh=False, proxy=None, context=None): """ Return the functions for the dynamic grains and the values for the static grains. Since grains are computed early in the startup process, grains functions do not have __salt__ or __proxy__ available. At proxy-minion startup, this function is called with the proxymodule LazyLoader object so grains functions can communicate with their controlled device. .. code-block:: python import salt.config import salt.loader __opts__ = salt.config.minion_config('/etc/salt/minion') __grains__ = salt.loader.grains(__opts__) print __grains__['id'] """ # Need to re-import salt.config, somehow it got lost when a minion is starting import salt.config # if we have no grains, lets try loading from disk (TODO: move to decorator?) cfn = os.path.join(opts["cachedir"], "grains.cache.p") if not force_refresh and opts.get("grains_cache", False): cached_grains = _load_cached_grains(opts, cfn) if cached_grains: return cached_grains else: log.debug("Grains refresh requested. Refreshing grains.") if opts.get("skip_grains", False): return {} grains_deep_merge = opts.get("grains_deep_merge", False) is True if "conf_file" in opts: pre_opts = {} pre_opts.update( salt.config.load_config( opts["conf_file"], "SALT_MINION_CONFIG", salt.config.DEFAULT_MINION_OPTS["conf_file"], ) ) default_include = pre_opts.get("default_include", opts["default_include"]) include = pre_opts.get("include", []) pre_opts.update( salt.config.include_config( default_include, opts["conf_file"], verbose=False ) ) pre_opts.update( salt.config.include_config(include, opts["conf_file"], verbose=True) ) if "grains" in pre_opts: opts["grains"] = pre_opts["grains"] else: opts["grains"] = {} else: opts["grains"] = {} grains_data = {} blist = opts.get("grains_blacklist", []) funcs = grain_funcs(opts, proxy=proxy, context=context or {}) if force_refresh: # if we refresh, lets reload grain modules funcs.clear() # Run core grains for key in funcs: if not key.startswith("core."): continue log.trace("Loading %s grain", key) ret = funcs[key]() if not isinstance(ret, dict): continue if blist: for key in list(ret): for block in blist: if salt.utils.stringutils.expr_match(key, block): del ret[key] log.trace("Filtering %s grain", key) if not ret: continue if grains_deep_merge: salt.utils.dictupdate.update(grains_data, ret) else: grains_data.update(ret) # Run the rest of the grains for key in funcs: if key.startswith("core.") or key == "_errors": continue try: # Grains are loaded too early to take advantage of the injected # __proxy__ variable. Pass an instance of that LazyLoader # here instead to grains functions if the grains functions take # one parameter. Then the grains can have access to the # proxymodule for retrieving information from the connected # device. log.trace("Loading %s grain", key) parameters = salt.utils.args.get_function_argspec(funcs[key]).args kwargs = {} if "proxy" in parameters: kwargs["proxy"] = proxy if "grains" in parameters: kwargs["grains"] = grains_data ret = funcs[key](**kwargs) except Exception: # pylint: disable=broad-except if salt.utils.platform.is_proxy(): log.info( "The following CRITICAL message may not be an error; the proxy may not be completely established yet." ) log.critical( "Failed to load grains defined in grain file %s in " "function %s, error:\n", key, funcs[key], exc_info=True, ) continue if not isinstance(ret, dict): continue if blist: for key in list(ret): for block in blist: if salt.utils.stringutils.expr_match(key, block): del ret[key] log.trace("Filtering %s grain", key) if not ret: continue if grains_deep_merge: salt.utils.dictupdate.update(grains_data, ret) else: grains_data.update(ret) if opts.get("proxy_merge_grains_in_module", True) and proxy: try: proxytype = proxy.opts["proxy"]["proxytype"] if proxytype + ".grains" in proxy: if ( proxytype + ".initialized" in proxy and proxy[proxytype + ".initialized"]() ): try: proxytype = proxy.opts["proxy"]["proxytype"] ret = proxy[proxytype + ".grains"]() if grains_deep_merge: salt.utils.dictupdate.update(grains_data, ret) else: grains_data.update(ret) except Exception: # pylint: disable=broad-except log.critical( "Failed to run proxy's grains function!", exc_info=True ) except KeyError: pass grains_data.update(opts["grains"]) # Write cache if enabled if opts.get("grains_cache", False): with salt.utils.files.set_umask(0o077): try: if salt.utils.platform.is_windows(): # Late import import salt.modules.cmdmod # Make sure cache file isn't read-only salt.modules.cmdmod._run_quiet('attrib -R "{}"'.format(cfn)) with salt.utils.files.fopen(cfn, "w+b") as fp_: try: serial = salt.payload.Serial(opts) serial.dump(grains_data, fp_) except TypeError as e: log.error("Failed to serialize grains cache: %s", e) raise # re-throw for cleanup except Exception as e: # pylint: disable=broad-except log.error("Unable to write to grains cache file %s: %s", cfn, e) # Based on the original exception, the file may or may not have been # created. If it was, we will remove it now, as the exception means # the serialized data is not to be trusted, no matter what the # exception is. if os.path.isfile(cfn): os.unlink(cfn) if grains_deep_merge: salt.utils.dictupdate.update(grains_data, opts["grains"]) else: grains_data.update(opts["grains"]) return salt.utils.data.decode(grains_data, preserve_tuples=True) # TODO: get rid of? Does anyone use this? You should use raw() instead def call(fun, **kwargs): """ Directly call a function inside a loader directory """ args = kwargs.get("args", []) dirs = kwargs.get("dirs", []) funcs = LazyLoader( [os.path.join(SALT_BASE_PATH, "modules")] + dirs, None, tag="modules", virtual_enable=False, ) return funcs[fun](*args) def runner(opts, utils=None, context=None, whitelist=None): """ Directly call a function inside a loader directory """ if utils is None: utils = {} if context is None: context = {} return LazyLoader( _module_dirs(opts, "runners", "runner", ext_type_dirs="runner_dirs"), opts, tag="runners", pack={"__utils__": utils, "__context__": context}, whitelist=whitelist, extra_module_dirs=utils.module_dirs if utils else None, # TODO: change from __salt__ to something else, we overload __salt__ too much pack_self="__salt__", ) def queues(opts): """ Directly call a function inside a loader directory """ return LazyLoader( _module_dirs(opts, "queues", "queue", ext_type_dirs="queue_dirs"), opts, tag="queues", ) def sdb(opts, functions=None, whitelist=None, utils=None): """ Make a very small database call """ if utils is None: utils = {} return LazyLoader( _module_dirs(opts, "sdb"), opts, tag="sdb", pack={ "__sdb__": functions, "__utils__": utils, "__salt__": minion_mods(opts, utils=utils), }, whitelist=whitelist, extra_module_dirs=utils.module_dirs if utils else None, ) def pkgdb(opts): """ Return modules for SPM's package database .. versionadded:: 2015.8.0 """ return LazyLoader( _module_dirs(opts, "pkgdb", base_path=os.path.join(SALT_BASE_PATH, "spm")), opts, tag="pkgdb", ) def pkgfiles(opts): """ Return modules for SPM's file handling .. versionadded:: 2015.8.0 """ return LazyLoader( _module_dirs(opts, "pkgfiles", base_path=os.path.join(SALT_BASE_PATH, "spm")), opts, tag="pkgfiles", ) def clouds(opts): """ Return the cloud functions """ _utils = salt.loader.utils(opts) # Let's bring __active_provider_name__, defaulting to None, to all cloud # drivers. This will get temporarily updated/overridden with a context # manager when needed. functions = LazyLoader( _module_dirs( opts, "clouds", "cloud", base_path=os.path.join(SALT_BASE_PATH, "cloud"), int_type="clouds", ), opts, tag="clouds", pack={"__utils__": _utils, "__active_provider_name__": None}, extra_module_dirs=_utils.module_dirs, ) for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED: log.trace( "'%s' has been marked as not supported. Removing from the " "list of supported cloud functions", funcname, ) functions.pop(funcname, None) return functions def netapi(opts): """ Return the network api functions """ return LazyLoader(_module_dirs(opts, "netapi"), opts, tag="netapi",) def executors(opts, functions=None, context=None, proxy=None): """ Returns the executor modules """ if proxy is None: proxy = {} if context is None: context = {} return LazyLoader( _module_dirs(opts, "executors", "executor"), opts, tag="executor", pack={"__salt__": functions, "__context__": context, "__proxy__": proxy}, pack_self="__executors__", ) def cache(opts, serial): """ Returns the returner modules """ return LazyLoader( _module_dirs(opts, "cache", "cache"), opts, tag="cache", pack={"__context__": {"serial": serial}}, ) def _generate_module(name): if name in sys.modules: return code = "'''Salt loaded {} parent module'''".format(name.split(".")[-1]) # ModuleType can't accept a unicode type on PY2 module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function exec(code, module.__dict__) sys.modules[name] = module def _mod_type(module_path): if module_path.startswith(SALT_BASE_PATH): return "int" return "ext" # TODO: move somewhere else? class FilterDictWrapper(MutableMapping): """ Create a dict which wraps another dict with a specific key suffix on get This is to replace "filter_load" """ def __init__(self, d, suffix): self._dict = d self.suffix = suffix def __setitem__(self, key, val): self._dict[key] = val def __delitem__(self, key): del self._dict[key] def __getitem__(self, key): return self._dict[key + self.suffix] def __len__(self): return len(self._dict) def __iter__(self): for key in self._dict: if key.endswith(self.suffix): yield key.replace(self.suffix, "") class LoadedFunc: """ The functions loaded by LazyLoader instances using subscript notation 'a[k]' will be wrapped with LoadedFunc. - Makes sure functions are called with the correct loader's context. - Provides access to a wrapped func's __global__ attribute :param func callable: The callable to wrap. :param dict loader: The loader to use in the context when the wrapped callable is called. """ def __init__(self, func, loader): self.func = func self.loader = loader functools.update_wrapper(self, func) def __getattr__(self, name): return getattr(self.func, name) def __call__(self, *args, **kwargs): if self.loader.inject_globals: run_func = global_injector_decorator(self.loader.inject_globals)(self.func) else: run_func = self.func return self.loader.run(run_func, *args, **kwargs) class LoadedMod: def __init__(self, mod, loader): """ Return the wrapped func's globals via this object's __globals__ attribute. """ self.mod = mod self.loader = loader def __getattr__(self, name): """ Run the wrapped function in the loader's context. """ attr = getattr(self.mod, name) if inspect.isfunction(attr) or inspect.ismethod(attr): return LoadedFunc(attr, self.loader) return attr class LazyLoader(salt.utils.lazy.LazyDict): """ A pseduo-dictionary which has a set of keys which are the name of the module and function, delimited by a dot. When the value of the key is accessed, the function is then loaded from disk and into memory. .. note:: Iterating over keys will cause all modules to be loaded. :param list module_dirs: A list of directories on disk to search for modules :param dict opts: The salt options dictionary. :param str tag: The tag for the type of module to load :param func mod_type_check: A function which can be used to verify files :param dict pack: A dictionary of function to be packed into modules as they are loaded :param list whitelist: A list of modules to whitelist :param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules. :param str virtual_funcs: The name of additional functions in the module to call to verify its functionality. If not true, the module will not load. :param list extra_module_dirs: A list of directories that will be able to import from :param str pack_self: Pack this module into a variable by this name into modules loaded :returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values are function references themselves which are loaded on-demand. # TODO: - move modules_max_memory into here - singletons (per tag) """ mod_dict_class = salt.utils.odict.OrderedDict def __init__( self, module_dirs, opts=None, tag="module", loaded_base_name=None, mod_type_check=None, pack=None, whitelist=None, virtual_enable=True, static_modules=None, proxy=None, virtual_funcs=None, extra_module_dirs=None, pack_self=None, ): # pylint: disable=W0231 """ In pack, if any of the values are None they will be replaced with an empty context-specific dict """ self.parent_loader = None self.inject_globals = {} self.pack = {} if pack is None else pack for i in self.pack: if isinstance(self.pack[i], salt.loader_context.NamedLoaderContext): self.pack[i] = self.pack[i].value() if opts is None: opts = {} threadsafety = not opts.get("multiprocessing") self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety) self.opts = self.__prep_mod_opts(opts) self.pack_self = pack_self self.module_dirs = module_dirs self.tag = tag self._gc_finalizer = None if loaded_base_name and loaded_base_name != LOADED_BASE_NAME: self.loaded_base_name = loaded_base_name else: self.loaded_base_name = LOADED_BASE_NAME self.mod_type_check = mod_type_check or _mod_type if "__context__" not in self.pack: self.pack["__context__"] = None for k, v in self.pack.items(): if v is None: # if the value of a pack is None, lets make an empty dict self.context_dict.setdefault(k, {}) self.pack[k] = salt.utils.context.NamespacedDictWrapper( self.context_dict, k ) self.whitelist = whitelist self.virtual_enable = virtual_enable self.initial_load = True # names of modules that we don't have (errors, __virtual__, etc.) self.missing_modules = {} # mapping of name -> error self.loaded_modules = {} # mapping of module_name -> dict_of_functions self.loaded_files = set() # TODO: just remove them from file_mapping? self.static_modules = static_modules if static_modules else [] if virtual_funcs is None: virtual_funcs = [] self.virtual_funcs = virtual_funcs self.extra_module_dirs = extra_module_dirs if extra_module_dirs else [] self._clean_module_dirs = [] self.disabled = set( self.opts.get( "disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"), [], ) ) # A map of suffix to description for imp self.suffix_map = {} # A list to determine precedence of extensions # Prefer packages (directories) over modules (single files)! self.suffix_order = [""] for (suffix, mode, kind) in SUFFIXES: self.suffix_map[suffix] = (suffix, mode, kind) self.suffix_order.append(suffix) self._lock = threading.RLock() with self._lock: self._refresh_file_mapping() super().__init__() # late init the lazy loader # create all of the import namespaces _generate_module("{}.int".format(self.loaded_base_name)) _generate_module("{}.int.{}".format(self.loaded_base_name, tag)) _generate_module("{}.ext".format(self.loaded_base_name)) _generate_module("{}.ext.{}".format(self.loaded_base_name, tag)) def clean_modules(self): """ Clean modules """ for name in list(sys.modules): if name.startswith(self.loaded_base_name): del sys.modules[name] def __getitem__(self, item): """ Override the __getitem__ in order to decorate the returned function if we need to last-minute inject globals """ func = super().__getitem__(item) return LoadedFunc(func, self) def __getattr__(self, mod_name): """ Allow for "direct" attribute access-- this allows jinja templates to access things like `salt.test.ping()` """ if mod_name in ("__getstate__", "__setstate__"): return object.__getattribute__(self, mod_name) # if we have an attribute named that, lets return it. try: return object.__getattr__(self, mod_name) # pylint: disable=no-member except AttributeError: pass # otherwise we assume its jinja template access if mod_name not in self.loaded_modules and not self.loaded: for name in self._iter_files(mod_name): if name in self.loaded_files: continue # if we got what we wanted, we are done if self._load_module(name) and mod_name in self.loaded_modules: break if mod_name in self.loaded_modules: return LoadedMod(self.loaded_modules[mod_name], self) else: raise AttributeError(mod_name) def missing_fun_string(self, function_name): """ Return the error string for a missing function. This can range from "not available' to "__virtual__" returned False """ mod_name = function_name.split(".")[0] if mod_name in self.loaded_modules: return "'{}' is not available.".format(function_name) else: try: reason = self.missing_modules[mod_name] except KeyError: return "'{}' is not available.".format(function_name) else: if reason is not None: return "'{}' __virtual__ returned False: {}".format( mod_name, reason ) else: return "'{}' __virtual__ returned False".format(mod_name) def _refresh_file_mapping(self): """ refresh the mapping of the FS on disk """ # map of suffix to description for imp if ( self.opts.get("cython_enable", True) is True and ".pyx" not in self.suffix_map ): try: global pyximport pyximport = __import__("pyximport") # pylint: disable=import-error pyximport.install() # add to suffix_map so file_mapping will pick it up self.suffix_map[".pyx"] = tuple() if ".pyx" not in self.suffix_order: self.suffix_order.append(".pyx") except ImportError: log.info( "Cython is enabled in the options but not present " "in the system path. Skipping Cython modules." ) # Allow for zipimport of modules if ( self.opts.get("enable_zip_modules", True) is True and ".zip" not in self.suffix_map ): self.suffix_map[".zip"] = tuple() if ".zip" not in self.suffix_order: self.suffix_order.append(".zip") # allow for module dirs self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY) # create mapping of filename (without suffix) to (path, suffix) # The files are added in order of priority, so order *must* be retained. self.file_mapping = salt.utils.odict.OrderedDict() opt_match = [] def _replace_pre_ext(obj): """ Hack so we can get the optimization level that we replaced (if any) out of the re.sub call below. We use a list here because it is a persistent data structure that we will be able to access after re.sub is called. """ opt_match.append(obj) return "" for mod_dir in self.module_dirs: try: # Make sure we have a sorted listdir in order to have # expectable override results files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__") except OSError: continue # Next mod_dir try: pycache_files = [ os.path.join("__pycache__", x) for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__"))) ] except OSError: pass else: files.extend(pycache_files) for filename in files: try: dirname, basename = os.path.split(filename) if basename.startswith("_"): # skip private modules # log messages omitted for obviousness continue # Next filename f_noext, ext = os.path.splitext(basename) f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext) try: opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1]) except (AttributeError, IndexError, ValueError): # No regex match or no optimization level matched opt_level = 0 try: opt_index = self.opts["optimization_order"].index(opt_level) except KeyError: log.trace( "Disallowed optimization level %d for module " "name '%s', skipping. Add %d to the " "'optimization_order' config option if you " "do not want to ignore this optimization " "level.", opt_level, f_noext, opt_level, ) continue # make sure it is a suffix we support if ext not in self.suffix_map: continue # Next filename if f_noext in self.disabled: log.trace( "Skipping %s, it is disabled by configuration", filename ) continue # Next filename fpath = os.path.join(mod_dir, filename) # if its a directory, lets allow us to load that if ext == "": # is there something __init__? subfiles = os.listdir(fpath) for suffix in self.suffix_order: if "" == suffix: continue # Next suffix (__init__ must have a suffix) init_file = "__init__{}".format(suffix) if init_file in subfiles: break else: continue # Next filename try: curr_ext = self.file_mapping[f_noext][1] curr_opt_index = self.file_mapping[f_noext][2] except KeyError: pass else: if "" in (curr_ext, ext) and curr_ext != ext: log.error( "Module/package collision: '%s' and '%s'", fpath, self.file_mapping[f_noext][0], ) if six.PY3 and ext == ".pyc" and curr_ext == ".pyc": # Check the optimization level if opt_index >= curr_opt_index: # Module name match, but a higher-priority # optimization level was already matched, skipping. continue elif not curr_ext or self.suffix_order.index( ext ) >= self.suffix_order.index(curr_ext): # Match found but a higher-priorty match already # exists, so skip this. continue if six.PY3 and not dirname and ext == ".pyc": # On Python 3, we should only load .pyc files from the # __pycache__ subdirectory (i.e. when dirname is not an # empty string). continue # Made it this far - add it self.file_mapping[f_noext] = (fpath, ext, opt_index) except OSError: continue for smod in self.static_modules: f_noext = smod.split(".")[-1] self.file_mapping[f_noext] = (smod, ".o", 0) def clear(self): """ Clear the dict """ with self._lock: super().clear() # clear the lazy loader self.loaded_files = set() self.missing_modules = {} self.loaded_modules = {} # if we have been loaded before, lets clear the file mapping since # we obviously want a re-do if hasattr(self, "opts"): self._refresh_file_mapping() self.initial_load = False def __prep_mod_opts(self, opts): """ Strip out of the opts any logger instance """ if "__grains__" not in self.pack: grains = opts.get("grains", {}) if isinstance(grains, salt.loader_context.NamedLoaderContext): grains = grains.value() self.context_dict["grains"] = grains self.pack["__grains__"] = salt.utils.context.NamespacedDictWrapper( self.context_dict, "grains" ) if "__pillar__" not in self.pack: pillar = opts.get("pillar", {}) if isinstance(pillar, salt.loader_context.NamedLoaderContext): pillar = pillar.value() self.context_dict["pillar"] = pillar self.pack["__pillar__"] = salt.utils.context.NamespacedDictWrapper( self.context_dict, "pillar" ) mod_opts = {} for key, val in list(opts.items()): if key == "logger": continue mod_opts[key] = val return mod_opts def _iter_files(self, mod_name): """ Iterate over all file_mapping files in order of closeness to mod_name """ # do we have an exact match? if mod_name in self.file_mapping: yield mod_name # do we have a partial match? for k in self.file_mapping: if mod_name in k: yield k # anyone else? Bueller? for k in self.file_mapping: if mod_name not in k: yield k def _reload_submodules(self, mod): submodules = ( getattr(mod, sname) for sname in dir(mod) if isinstance(getattr(mod, sname), mod.__class__) ) # reload only custom "sub"modules for submodule in submodules: # it is a submodule if the name is in a namespace under mod if submodule.__name__.startswith(mod.__name__ + "."): reload_module(submodule) self._reload_submodules(submodule) def __populate_sys_path(self): for directory in self.extra_module_dirs: if directory not in sys.path: sys.path.append(directory) self._clean_module_dirs.append(directory) def __clean_sys_path(self): invalidate_path_importer_cache = False for directory in self._clean_module_dirs: if directory in sys.path: sys.path.remove(directory) invalidate_path_importer_cache = True self._clean_module_dirs = [] # Be sure that sys.path_importer_cache do not contains any # invalid FileFinder references importlib.invalidate_caches() # Because we are mangling with importlib, we can find from # time to time an invalidation issue with # sys.path_importer_cache, that requires the removal of # FileFinder that remain None for the extra_module_dirs if invalidate_path_importer_cache: for directory in self.extra_module_dirs: if ( directory in sys.path_importer_cache and sys.path_importer_cache[directory] is None ): del sys.path_importer_cache[directory] def _load_module(self, name): mod = None fpath, suffix = self.file_mapping[name][:2] # if the fpath has `.cpython-3x` in it, but the running Py version # is 3.y, the following will cause us to return immediately and we won't try to import this .pyc. # This is for the unusual case where several Python versions share a single # source tree and drop their .pycs in the same __pycache__ folder. # If we were to load a .pyc for another Py version it's not a big problem # but the log will get spammed with "Bad Magic Number" messages that # can be very misleading if the user is debugging another problem. try: (implementation_tag, cache_tag_ver) = sys.implementation.cache_tag.split( "-" ) if cache_tag_ver not in fpath and implementation_tag in fpath: log.trace( "Trying to load %s on %s, returning False.", fpath, sys.implementation.cache_tag, ) return False except AttributeError: # Most likely Py 2.7 or some other Python version we don't really support pass self.loaded_files.add(name) fpath_dirname = os.path.dirname(fpath) try: self.__populate_sys_path() sys.path.append(fpath_dirname) if suffix == ".pyx": mod = pyximport.load_module(name, fpath, tempfile.gettempdir()) elif suffix == ".o": top_mod = __import__(fpath, globals(), locals(), []) comps = fpath.split(".") if len(comps) < 2: mod = top_mod else: mod = top_mod for subname in comps[1:]: mod = getattr(mod, subname) elif suffix == ".zip": mod = zipimporter(fpath).load_module(name) else: desc = self.suffix_map[suffix] # if it is a directory, we don't open a file try: mod_namespace = ".".join( ( self.loaded_base_name, self.mod_type_check(fpath), self.tag, name, ) ) except TypeError: mod_namespace = "{}.{}.{}.{}".format( self.loaded_base_name, self.mod_type_check(fpath), self.tag, name, ) if suffix == "": # pylint: disable=no-member # Package directory, look for __init__ loader_details = [ ( importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES, ), ( importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES, ), ( importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES, ), ] file_finder = importlib.machinery.FileFinder( fpath_dirname, *loader_details ) spec = file_finder.find_spec(mod_namespace) if spec is None: raise ImportError() # TODO: Get rid of load_module in favor of # exec_module below. load_module is deprecated, but # loading using exec_module has been causing odd things # with the magic dunders we pack into the loaded # modules, most notably with salt-ssh's __opts__. mod = spec.loader.load_module() # mod = importlib.util.module_from_spec(spec) # spec.loader.exec_module(mod) # pylint: enable=no-member sys.modules[mod_namespace] = mod # reload all submodules if necessary if not self.initial_load: self._reload_submodules(mod) else: # pylint: disable=no-member loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath) spec = importlib.util.spec_from_file_location( mod_namespace, fpath, loader=loader ) if spec is None: raise ImportError() # TODO: Get rid of load_module in favor of # exec_module below. load_module is deprecated, but # loading using exec_module has been causing odd things # with the magic dunders we pack into the loaded # modules, most notably with salt-ssh's __opts__. mod = self.run(spec.loader.load_module) # mod = importlib.util.module_from_spec(spec) # spec.loader.exec_module(mod) # pylint: enable=no-member sys.modules[mod_namespace] = mod except OSError: raise except ImportError as exc: if "magic number" in str(exc): error_msg = "Failed to import {} {}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.".format( self.tag, name ) log.warning(error_msg) self.missing_modules[name] = error_msg log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True) self.missing_modules[name] = exc return False except Exception as error: # pylint: disable=broad-except log.error( "Failed to import %s %s, this is due most likely to a " "syntax error:\n", self.tag, name, exc_info=True, ) self.missing_modules[name] = error return False except SystemExit as error: try: fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1] except Exception: # pylint: disable=broad-except pass else: tgt_fns = [ os.path.join("salt", "utils", "process.py"), os.path.join("salt", "cli", "daemons.py"), os.path.join("salt", "cli", "api.py"), ] for tgt_fn in tgt_fns: if fn_.endswith(tgt_fn) and "_handle_signals" in caller: # Race conditon, SIGTERM or SIGINT received while loader # was in process of loading a module. Call sys.exit to # ensure that the process is killed. sys.exit(salt.defaults.exitcodes.EX_OK) log.error( "Failed to import %s %s as the module called exit()\n", self.tag, name, exc_info=True, ) self.missing_modules[name] = error return False finally: sys.path.remove(fpath_dirname) self.__clean_sys_path() loader_context = salt.loader_context.LoaderContext() if hasattr(mod, "__salt_loader__"): if not isinstance(mod.__salt_loader__, salt.loader_context.LoaderContext): log.warning("Override __salt_loader__: %s", mod) mod.__salt_loader__ = loader_context else: mod.__salt_loader__ = loader_context if hasattr(mod, "__opts__"): if not isinstance(mod.__opts__, salt.loader_context.NamedLoaderContext): if not hasattr(mod, "__orig_opts__"): mod.__orig_opts__ = copy.deepcopy(mod.__opts__) mod.__opts__ = copy.deepcopy(mod.__orig_opts__) mod.__opts__.update(self.opts) else: if not hasattr(mod, "__orig_opts__"): mod.__orig_opts__ = {} mod.__opts__ = copy.deepcopy(mod.__orig_opts__) mod.__opts__.update(self.opts) # pack whatever other globals we were asked to for p_name, p_value in self.pack.items(): mod_named_context = getattr(mod, p_name, None) if hasattr(mod_named_context, "default"): default = copy.deepcopy(mod_named_context.default) else: default = None named_context = loader_context.named_context(p_name, default) if mod_named_context is None: setattr(mod, p_name, named_context) elif named_context != mod_named_context: log.debug("Override %s: %s", p_name, mod) setattr(mod, p_name, named_context) else: setattr(mod, p_name, named_context) if self.pack_self is not None: mod_named_context = getattr(mod, self.pack_self, None) if hasattr(mod_named_context, "default"): default = copy.deepcopy(mod_named_context.default) else: default = None named_context = loader_context.named_context(self.pack_self, default) if mod_named_context is None: setattr(mod, self.pack_self, named_context) elif named_context != mod_named_context: log.debug("Override %s: %s", self.pack_self, mod) setattr(mod, self.pack_self, named_context) else: setattr(mod, self.pack_self, named_context) module_name = mod.__name__.rsplit(".", 1)[-1] # Call a module's initialization method if it exists module_init = getattr(mod, "__init__", None) if inspect.isfunction(module_init): try: self.run(module_init, self.opts) except TypeError as e: log.error(e) except Exception: # pylint: disable=broad-except err_string = "__init__ failed" log.debug( "Error loading %s.%s: %s", self.tag, module_name, err_string, exc_info=True, ) self.missing_modules[module_name] = err_string self.missing_modules[name] = err_string return False # if virtual modules are enabled, we need to look for the # __virtual__() function inside that module and run it. if self.virtual_enable: virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs for virtual_func in virtual_funcs_to_process: ( virtual_ret, module_name, virtual_err, virtual_aliases, ) = self._process_virtual(mod, module_name, virtual_func) if virtual_err is not None: log.trace( "Error loading %s.%s: %s", self.tag, module_name, virtual_err ) # if _process_virtual returned a non-True value then we are # supposed to not process this module if virtual_ret is not True and module_name not in self.missing_modules: # If a module has information about why it could not be loaded, record it self.missing_modules[module_name] = virtual_err self.missing_modules[name] = virtual_err return False else: virtual_aliases = () # If this is a proxy minion then MOST modules cannot work. Therefore, require that # any module that does work with salt-proxy-minion define __proxyenabled__ as a list # containing the names of the proxy types that the module supports. # # Render modules and state modules are OK though if "proxy" in self.opts: if self.tag in ["grains", "proxy"]: if not hasattr(mod, "__proxyenabled__") or ( self.opts["proxy"]["proxytype"] not in mod.__proxyenabled__ and "*" not in mod.__proxyenabled__ ): err_string = "not a proxy_minion enabled module" self.missing_modules[module_name] = err_string self.missing_modules[name] = err_string return False if getattr(mod, "__load__", False) is not False: log.info( "The functions from module '%s' are being loaded from the " "provided __load__ attribute", module_name, ) # If we had another module by the same virtual name, we should put any # new functions under the existing dictionary. mod_names = [module_name] + list(virtual_aliases) mod_dict = { x: self.loaded_modules.get(x, self.mod_dict_class()) for x in mod_names } for attr in getattr(mod, "__load__", dir(mod)): if attr.startswith("_"): # private functions are skipped continue func = getattr(mod, attr) if not inspect.isfunction(func) and not isinstance(func, functools.partial): # Not a function!? Skip it!!! continue # Let's get the function name. # If the module has the __func_alias__ attribute, it must be a # dictionary mapping in the form of(key -> value): # <real-func-name> -> <desired-func-name> # # It default's of course to the found callable attribute name # if no alias is defined. funcname = getattr(mod, "__func_alias__", {}).get(attr, attr) for tgt_mod in mod_names: try: full_funcname = ".".join((tgt_mod, funcname)) except TypeError: full_funcname = "{}.{}".format(tgt_mod, funcname) # Save many references for lookups # Careful not to overwrite existing (higher priority) functions if full_funcname not in self._dict: self._dict[full_funcname] = func if funcname not in mod_dict[tgt_mod]: setattr(mod_dict[tgt_mod], funcname, func) mod_dict[tgt_mod][funcname] = func self._apply_outputter(func, mod) # enforce depends try: Depends.enforce_dependencies(self._dict, self.tag, name) except RuntimeError as exc: log.info( "Depends.enforce_dependencies() failed for the following " "reason: %s", exc, ) for tgt_mod in mod_names: self.loaded_modules[tgt_mod] = mod_dict[tgt_mod] return True def _load(self, key): """ Load a single item if you have it """ # if the key doesn't have a '.' then it isn't valid for this mod dict if not isinstance(key, str): raise KeyError("The key must be a string.") if "." not in key: raise KeyError("The key '{}' should contain a '.'".format(key)) mod_name, _ = key.split(".", 1) with self._lock: # It is possible that the key is in the dictionary after # acquiring the lock due to another thread loading it. if mod_name in self.missing_modules or key in self._dict: return True # if the modulename isn't in the whitelist, don't bother if self.whitelist and mod_name not in self.whitelist: log.error( "Failed to load function %s because its module (%s) is " "not in the whitelist: %s", key, mod_name, self.whitelist, ) raise KeyError(key) def _inner_load(mod_name): for name in self._iter_files(mod_name): if name in self.loaded_files: continue # if we got what we wanted, we are done if self._load_module(name) and key in self._dict: return True return False # try to load the module ret = None reloaded = False # re-scan up to once, IOErrors or a failed load cause re-scans of the # filesystem while True: try: ret = _inner_load(mod_name) if not reloaded and ret is not True: self._refresh_file_mapping() reloaded = True continue break except OSError: if not reloaded: self._refresh_file_mapping() reloaded = True continue return ret def _load_all(self): """ Load all of them """ with self._lock: for name in self.file_mapping: if name in self.loaded_files or name in self.missing_modules: continue self._load_module(name) self.loaded = True def reload_modules(self): with self._lock: self.loaded_files = set() self._load_all() def _apply_outputter(self, func, mod): """ Apply the __outputter__ variable to the functions """ if hasattr(mod, "__outputter__"): outp = mod.__outputter__ if func.__name__ in outp: func.__outputter__ = outp[func.__name__] def _process_virtual(self, mod, module_name, virtual_func="__virtual__"): """ Given a loaded module and its default name determine its virtual name This function returns a tuple. The first value will be either True or False and will indicate if the module should be loaded or not (i.e. if it threw and exception while processing its __virtual__ function). The second value is the determined virtual name, which may be the same as the value provided. The default name can be calculated as follows:: module_name = mod.__name__.rsplit('.', 1)[-1] """ # The __virtual__ function will return either a True or False value. # If it returns a True value it can also set a module level attribute # named __virtualname__ with the name that the module should be # referred to as. # # This allows us to have things like the pkg module working on all # platforms under the name 'pkg'. It also allows for modules like # augeas_cfg to be referred to as 'augeas', which would otherwise have # namespace collisions. And finally it allows modules to return False # if they are not intended to run on the given platform or are missing # dependencies. virtual_aliases = getattr(mod, "__virtual_aliases__", tuple()) try: error_reason = None if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__): try: start = time.time() virtual_attr = getattr(mod, virtual_func) virtual = self.run(virtual_attr) if isinstance(virtual, tuple): error_reason = virtual[1] virtual = virtual[0] if self.opts.get("virtual_timer", False): end = time.time() - start msg = "Virtual function took {} seconds for {}".format( end, module_name ) log.warning(msg) except Exception as exc: # pylint: disable=broad-except error_reason = ( "Exception raised when processing __virtual__ function" " for {}. Module will not be loaded: {}".format( mod.__name__, exc ) ) log.error(error_reason, exc_info_on_loglevel=logging.DEBUG) virtual = None # Get the module's virtual name virtualname = getattr(mod, "__virtualname__", virtual) if not virtual: # if __virtual__() evaluates to False then the module # wasn't meant for this platform or it's not supposed to # load for some other reason. # Some modules might accidentally return None and are # improperly loaded if virtual is None: log.warning( "%s.__virtual__() is wrongly returning `None`. " "It should either return `True`, `False` or a new " "name. If you're the developer of the module " "'%s', please fix this.", mod.__name__, module_name, ) return (False, module_name, error_reason, virtual_aliases) # At this point, __virtual__ did not return a # boolean value, let's check for deprecated usage # or module renames if virtual is not True and module_name != virtual: # The module is renaming itself. Updating the module name # with the new name log.trace("Loaded %s as virtual %s", module_name, virtual) if virtualname != virtual: # The __virtualname__ attribute does not match what's # being returned by the __virtual__() function. This # should be considered an error. log.error( "The module '%s' is showing some bad usage. Its " "__virtualname__ attribute is set to '%s' yet the " "__virtual__() function is returning '%s'. These " "values should match!", mod.__name__, virtualname, virtual, ) module_name = virtualname # If the __virtual__ function returns True and __virtualname__ # is set then use it elif virtual is True and virtualname != module_name: if virtualname is not True: module_name = virtualname except KeyError: # Key errors come out of the virtual function when passing # in incomplete grains sets, these can be safely ignored # and logged to debug, still, it includes the traceback to # help debugging. log.debug("KeyError when loading %s", module_name, exc_info=True) except Exception: # pylint: disable=broad-except # If the module throws an exception during __virtual__() # then log the information and continue to the next. log.error( "Failed to read the virtual function for %s: %s", self.tag, module_name, exc_info=True, ) return (False, module_name, error_reason, virtual_aliases) return (True, module_name, None, virtual_aliases) def run(self, _func_or_method, *args, **kwargs): """ Run the `_func_or_method` in this loader's context """ self._last_context = contextvars.copy_context() return self._last_context.run(self._run_as, _func_or_method, *args, **kwargs) def _run_as(self, _func_or_method, *args, **kwargs): """ Handle setting up the context properly and call the method """ self.parent_loader = None try: current_loader = salt.loader_context.loader_ctxvar.get() except LookupError: current_loader = None if current_loader is not self: self.parent_loader = current_loader token = salt.loader_context.loader_ctxvar.set(self) try: return _func_or_method(*args, **kwargs) finally: self.parent_loader = None salt.loader_context.loader_ctxvar.reset(token) def run_in_thread(self, _func_or_method, *args, **kwargs): """ Run the function in a new thread with the context of this loader """ argslist = [self, _func_or_method] argslist.extend(args) thread = threading.Thread(target=self.target, args=argslist, kwargs=kwargs) thread.start() return thread @staticmethod def target(loader, _func_or_method, *args, **kwargs): loader.run(_func_or_method, *args, **kwargs) def global_injector_decorator(inject_globals): """ Decorator used by the LazyLoader to inject globals into a function at execute time. globals Dictionary with global variables to inject """ def inner_decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): with salt.utils.context.func_globals_inject(f, **inject_globals): return f(*args, **kwargs) return wrapper return inner_decorator
client_server_test.py
#!/usr/bin/env python3 import os import unittest from unittest.mock import patch import securedrop.client as client from securedrop.client import LIST_CONTACTS_TEST_FILENAME from securedrop.server import ServerDriver, Server, DEFAULT_filename, AESWrapper import json import time import contextlib from multiprocessing import shared_memory, Process class InputSideEffect: i = 0 lt: list def __init__(self, lst): self.lt = lst def se(self, *args, **kwargs): val = self.lt[self.i] self.i += 1 return val @contextlib.contextmanager def server_process(): with ServerDriver() as driver: process = Process(target=driver.run) try: process.start() time.sleep(1) yield process finally: sentinel = shared_memory.SharedMemory(driver.sentinel_name()) sentinel.buf[0] = 1 sentinel.close() process.join() class TestRegistration(unittest.TestCase): # Test prefix aaa, aab, etc. is to ensure the tests run in the correct order def test_aaa_initial_ask_to_register_no_response_fails(self): """Ensures that client throws if the user declines to register a user.""" with server_process(): se = InputSideEffect(["n", "exit"]) with patch('builtins.input', side_effect=se.se): with self.assertRaises(RuntimeError): client.main() def test_aab_initial_ask_to_register_mismatching_passwords(self): """Ensures that client throws if the user inputs mismatching passwords during registration.""" with server_process(): se1 = InputSideEffect(["y", "name_v", "email_v@test.com", "exit"]) se2 = InputSideEffect(["password_v12", "password_v12_"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): with self.assertRaises(RuntimeError): client.main() def test_aac_initial_ask_to_register_empty_input(self): """Ensures that client throws if the user inputs an empty string during registration.""" with server_process(): for i in range(0, 2): for j in range(0, 2): se_lists = [["y", "name_v", "email_v@test.com", "exit"], ["password_v12", "password_v12"]] se_lists[i][j + int(i == 0)] = "" se1 = InputSideEffect(se_lists[0]) se2 = InputSideEffect(se_lists[1]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): with self.assertRaises(RuntimeError): client.main() def test_aad_initial_ask_to_register_invalid_email(self): """Ensures that client throws if the user inputs invalid email during registration.""" with server_process(): invalid_emails = [ "Abc.example.com", "A@b@c@example.com", "a\"b(c)d,e:f;g<h>i[j\\k]l@example.com", "just\"not\"right@example.com", "this is\"not\\allowed@example.com", "this\\ still\\\"not\\\\allowed@example.com", "1234567890123456789012345678901234567890123456789012345678901234+x@example.com", "i_like_underscore@but_its_not_allow_in_this_part.example.com" ] for i in invalid_emails: se1 = InputSideEffect(["y", "name_v", i, "exit"]) se2 = InputSideEffect(["password_v12", "password_v12"]) with patch('builtins.input', side_effect=se1.se): with self.assertRaises(RuntimeError): with patch('getpass.getpass', side_effect=se2.se): client.main() def test_aae_initial_ask_to_register_password_too_short(self): """Ensures that client throws if the user inputs a password that is too short during registration.""" with server_process(): se1 = InputSideEffect(["y", "name_v", "email_v", "exit"]) se2 = InputSideEffect(["password_v1", "password_v1"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): with self.assertRaises(RuntimeError): client.main() def test_aaf_initial_registration_succeeds(self): """Ensures that client doesn't throw during some wild, yet valid user registrations.""" valid_emails = [ "simple@example.com", "very.common@example.com", "x@example.com", "user%example.com@example.org", "mailhost!username@example.org", "user.name+tag+sorting@example.com", "fully-qualified-domain@example.com", "example-indeed@strange-example.com", "other.email-with-hyphen@example.com", "EXTREMELYLONGEMAIL12345678901234567890123456789012345678901234+x@example.com" ] with server_process(): for i in valid_emails: se1 = InputSideEffect(["y", "name_v", i, "exit"]) se2 = InputSideEffect(["password_v12", "password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() os.remove(client.DEFAULT_FILENAME) os.remove(DEFAULT_filename) def test_aag_initial_registration_succeeds(self): """Ensures that client doesn't throw during valid registration.""" with server_process(): se1 = InputSideEffect(["y", "name_v", "email_v@test.com", "exit"]) se2 = InputSideEffect(["password_v12", "password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() def assert_initial_registered_users_dict_is_valid(self, d): for email, cd in d.items(): self.assertEqual(email, "e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7") self.assertEqual(cd["email"], email) self.assertTrue(cd["name"]) self.assertTrue(cd["contacts"]) self.assertTrue(cd["auth"]["salt"]) self.assertTrue(cd["auth"]["key"]) def assert_initial_registered_users_is_valid(self, ru): for email, cd in ru.items(): self.assertEqual(email, "e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7") self.assertEqual(cd.email_hash, "e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7") self.assertTrue(cd.auth.salt) self.assertTrue(cd.auth.key) def test_aah_initial_json_valid(self): """Ensures that client serializes to JSON correctly after registration.""" with open(DEFAULT_filename, 'r') as f: jdict = json.load(f) self.assert_initial_registered_users_dict_is_valid(jdict) def test_aai_initial_load_from_json(self): """Ensures that client deserializes from JSON correctly.""" serv = Server(DEFAULT_filename) self.assert_initial_registered_users_is_valid(serv.users.users) def test_aaj_login_unknown_email(self): """Ensures that client throws if trying to login with an unknown email.""" with server_process(): se1 = InputSideEffect(["email_v_@test.com"]) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): with self.assertRaises(RuntimeError): client.main() def test_aak_login_wrong_password(self): """Ensures that client throws if trying to login with an incorrect password.""" with server_process(): se1 = InputSideEffect(["email_v@test.com"]) se2 = InputSideEffect(["password_v12_"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): with self.assertRaises(RuntimeError): client.main() def test_aal_login_correct_password(self): """Ensures that client logs in successfully with correct email/password.""" with server_process(): se1 = InputSideEffect(["email_v@test.com", "exit"]) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() def test_aam_add_contact_empty_input(self): """Ensures that client does not add a new contact if the input is an empty string.""" with server_process(): for i in range(0, 2): se_list = ["email_v@test.com", "add", "name_v_2", "email_v_2@test.com", "exit"] se_list[2 + i] = "" se1 = InputSideEffect(se_list) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() with open(DEFAULT_filename, 'r') as f: jdict = json.load(f) contacts = json.loads( AESWrapper("email_v@test.com").decrypt( jdict["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"] ["contacts"])) self.assertEqual(dict(), contacts) def test_aan_add_contact_invalid_email(self): """Ensures that client does not add a new contact if the input is an invalid email.""" invalid_emails = [ "Abc.example.com", "A@b@c@example.com", "a\"b(c)d,e:f;g<h>i[j\\k]l@example.com", "just\"not\"right@example.com", "this is\"not\\allowed@example.com", "this\\ still\\\"not\\\\allowed@example.com", "1234567890123456789012345678901234567890123456789012345678901234+x@example.com", "i_like_underscore@but_its_not_allow_in_this_part.example.com" ] with server_process(): for i in invalid_emails: se_list = ["email_v@test.com", "add", "name_v_2", i, "exit"] se_list[3] = i se1 = InputSideEffect(se_list) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() with open(DEFAULT_filename, 'r') as f: jdict = json.load(f) contacts = json.loads( AESWrapper("email_v@test.com").decrypt( jdict["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"] ["contacts"])) self.assertEqual(dict(), contacts) def test_aao_add_contact(self): """Ensures that client adds valid contacts successfully.""" with server_process(): se1 = InputSideEffect([ "email_v@test.com", "add", "name_v_2", "email_v_2@test.com", "add", "name_v_3", "email_v_3@test.com", "exit" ]) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main() with open(DEFAULT_filename, 'r') as f: jdict = json.load(f) contacts = json.loads( AESWrapper("email_v@test.com").decrypt( jdict["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"]["contacts"])) self.assertEqual("name_v_2", contacts["email_v_2@test.com"]) self.assertEqual("name_v_3", contacts["email_v_3@test.com"]) def test_aap_list_contacts_empty_dictionary(self): """Ensures that does not list users if no users have been added""" with server_process(): se1 = InputSideEffect(["email_v@test.com", "list", "exit"]) se2 = InputSideEffect(["password_v12"]) with patch('builtins.input', side_effect=se1.se): with patch('getpass.getpass', side_effect=se2.se): client.main(None, None, None, True) with open(LIST_CONTACTS_TEST_FILENAME, 'r') as f: jdict = json.load(f) is_empty = not bool(jdict) self.assertTrue(is_empty) def test_aaq_login_correct_password_decrypt_contact(self): """Ensures that client logs in successfully with correct email/password Then decrypts contacts.""" server = Server(DEFAULT_filename) user = server.users.users["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"] user.email = "email_v@test.com" self.assertNotEqual(user.enc_contacts, "name_v_3") user.decrypt_name_contacts() self.assertIsNotNone(user.contacts) self.assertEqual(user.contacts["email_v_2@test.com"], "name_v_2") self.assertEqual(user.contacts["email_v_3@test.com"], "name_v_3") def test_aar_data_in_memory_after_decrypt(self): """Ensures that Client data can be accessed in local memory after decryption""" server = Server(DEFAULT_filename) user = server.users.users["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"] user.email = "email_v@test.com" user.decrypt_name_contacts() self.assertEqual(user.name, "name_v") self.assertEqual(user.contacts["email_v_2@test.com"], "name_v_2") self.assertEqual(user.contacts["email_v_3@test.com"], "name_v_3") def test_aas_test_decrypt_wrong_password(self): """Ensures that client throws an error when decryption is not successful (wrong key).""" server = Server(DEFAULT_filename) user = server.users.users["e908de13f0f86b9c15f70d34cc1a5696280b3fbf822ae09343a779b19a3214b7"] user.email = "email_v_@test.com" with self.assertRaises(RuntimeError): user.decrypt_name_contacts() if __name__ == '__main__': unittest.main()
fsfreezer.py
#!/usr/bin/env python # # VM Backup extension # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from mounts import Mounts import datetime import threading import os import time import sys import signal import traceback import threading def thread_for_binary(self,args): self.logger.log("Thread for binary is called",True) time.sleep(5) self.logger.log("Waited in thread for 5 seconds",True) self.child = subprocess.Popen(args,stdout=subprocess.PIPE) self.logger.log("Binary subprocess Created",True) class FreezeError(object): def __init__(self): self.errorcode = None self.fstype = None self.path = None def __str__(self): return "errorcode:" + str(self.errorcode) + " fstype:" + str(self.fstype) + " path" + str(self.path) class FreezeResult(object): def __init__(self): self.errors = [] def __str__(self): error_str = "" for error in self.errors: error_str+=(str(error)) + "\n" return error_str class FreezeHandler(object): def __init__(self,logger,hutil): # sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed) self.sig_handle = 0 self.child= None self.logger=logger self.hutil = hutil def sigusr1_handler(self,signal,frame): self.logger.log('freezed',False) self.sig_handle=1 def sigchld_handler(self,signal,frame): self.logger.log('some child process terminated') if(self.child is not None and self.child.poll() is not None): self.logger.log("binary child terminated",True) self.sig_handle=2 def reset_signals(self): self.sig_handle = 0 self.child= None def startproc(self,args): binary_thread = threading.Thread(target=thread_for_binary, args=[self, args]) binary_thread.start() proc_sleep_time = self.hutil.get_value_from_configfile('SafeFreezeWaitInSeconds') if(proc_sleep_time == None or proc_sleep_time == ''): proc_sleep_time = 66 self.logger.log("safe freeze wait time in seconds : " + str(proc_sleep_time)) for i in range(0,(int(proc_sleep_time)/2)): if(self.sig_handle==0): self.logger.log("inside while with sig_handle "+str(self.sig_handle)) time.sleep(2) else: break self.logger.log("Binary output for signal handled: "+str(self.sig_handle)) return self.sig_handle def signal_receiver(self): signal.signal(signal.SIGUSR1,self.sigusr1_handler) signal.signal(signal.SIGCHLD,self.sigchld_handler) class FsFreezer: def __init__(self, patching, logger, hutil): """ """ self.patching = patching self.logger = logger self.hutil = hutil try: self.mounts = Mounts(patching = self.patching, logger = self.logger) except Exception as e: errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc()) self.logger.log(errMsg,True,'Warning') self.logger.log(str(e), True) self.mounts = None self.frozen_items = set() self.unfrozen_items = set() self.freeze_handler = FreezeHandler(self.logger, self.hutil) def should_skip(self, mount): if((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop'): return False else: return True def freeze_safe(self,timeout): self.root_seen = False error_msg='' try: freeze_result = FreezeResult() freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),"safefreeze/bin/safefreeze") args=[freezebin,str(timeout)] arg=[] for mount in self.mounts.mounts: self.logger.log("fsfreeze mount :" + str(mount.mount_point), True) if(mount.mount_point == '/'): self.root_seen = True self.root_mount = mount elif(mount.mount_point and not self.should_skip(mount)): args.append(str(mount.mount_point)) if(self.root_seen): args.append('/') self.logger.log("arg : " + str(args),True) self.freeze_handler.reset_signals() self.freeze_handler.signal_receiver() self.logger.log("proceeded for accepting signals", True) self.logger.enforce_local_flag(False) sig_handle=self.freeze_handler.startproc(args) if(sig_handle != 1): if (self.freeze_handler.child is not None): while True: line=self.freeze_handler.child.stdout.readline() if sys.version_info > (3,): line = str(line,encoding='utf-8', errors="backslashreplace") else: line = str(line) if(line != ''): self.logger.log(line.rstrip(), True) else: break error_msg="freeze failed for some mount" freeze_result.errors.append(error_msg) self.logger.log(error_msg, True, 'Error') except Exception as e: self.logger.enforce_local_flag(True) error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc()) freeze_result.errors.append(error_msg) self.logger.log(error_msg, True, 'Error') return freeze_result def thaw_safe(self): thaw_result = FreezeResult() unable_to_sleep = False if(self.freeze_handler.child is None): self.logger.log("child already completed", True) error_msg = 'snapshot result inconsistent' thaw_result.errors.append(error_msg) elif(self.freeze_handler.child.poll() is None): self.logger.log("child process still running") self.freeze_handler.child.send_signal(signal.SIGUSR1) for i in range(0,30): if(self.freeze_handler.child.poll() is None): self.logger.log("child still running sigusr1 sent") time.sleep(1) else: break self.logger.enforce_local_flag(True) self.logger.log("Binary output after process end: ", True) while True: line=self.freeze_handler.child.stdout.readline() if sys.version_info > (3,): line = str(line, encoding='utf-8', errors="backslashreplace") else: line = str(line) if(line != ''): self.logger.log(line.rstrip(), True) else: break if(self.freeze_handler.child.returncode!=0): error_msg = 'snapshot result inconsistent as child returns with failure' thaw_result.errors.append(error_msg) self.logger.log(error_msg, True, 'Error') else: self.logger.log("Binary output after process end when no thaw sent: ", True) if(self.freeze_handler.child.returncode==2): error_msg = 'Unable to execute sleep' thaw_result.errors.append(error_msg) unable_to_sleep = True else: error_msg = 'snapshot result inconsistent' thaw_result.errors.append(error_msg) self.logger.enforce_local_flag(True) while True: line=self.freeze_handler.child.stdout.readline() if sys.version_info > (3,): line = str(line, encoding='utf-8', errors="backslashreplace") else: line = str(line) if(line != ''): self.logger.log(line.rstrip(), True) else: break self.logger.log(error_msg, True, 'Error') self.logger.enforce_local_flag(True) return thaw_result, unable_to_sleep
server.py
import os import json import hmac import binascii import threading import time import itertools import smtplib from email.mime.text import MIMEText from http.server import HTTPServer, BaseHTTPRequestHandler # TODO: Tests, Logging, Configuration, Packaging, Deployment class Handler(BaseHTTPRequestHandler): encoding = 'ascii' def do_GET(self): if self.authenticate_request(): ip = self.client_address[0] self.server.updater.update_ip(ip) self.success(ip) else: self.forbidden() def success(self, ip): self.send_response(200) self.end_headers() self.json_response({ 'status': 'success', 'message': 'IP address successfully updated', 'ip': ip, }) def forbidden(self): self.send_response(403) self.end_headers() self.json_response({ 'status': 'forbidden', 'message': 'Invalid authentication token', }) def json_response(self, obj): self.wfile.write(json.dumps(obj).encode(self.encoding)) def authenticate_request(self): return self.server.updater.check_token(self.path[1:]) class Updater: def __init__(self, config): self.ip_filepath = config.get('checker', 'ip_file') self.token_filepath = config.get('checker', 'token_file') self.endpoint = config.get('server', 'public_endpoint') self.config = config def update_ip(self, ip): with open(self.ip_filepath, 'wb') as fh: fh.write(ip.encode('ascii')) if os.path.exists(self.token_filepath): os.remove(self.token_filepath) def is_update_needed(self): return not (os.path.exists(self.ip_filepath) or os.path.exists(self.token_filepath)) def trigger_update(self): auth_token = binascii.b2a_hex(os.urandom(32)) with open(self.token_filepath, 'wb') as fh: fh.write(auth_token) self.sendmail(auth_token) def sendmail(self, token): addr = 'http://{}/{}'.format(self.endpoint, token.decode('ascii')) msg = MIMEText( 'Click on the following link when you are connected to your home\n' 'network to update your IP address:\n' '\n' '' + addr + '\n' ) msg['Subject'] = 'Please update your IP address' msg['From'] = self.config.get('email', 'from_email') msg['To'] = self.config.get('email', 'to_email') print('sending email') s = smtplib.SMTP_SSL(self.config.get('email', 'smtp_server')) s.login( self.config.get('email', 'smtp_user'), self.config.get('email', 'smtp_password') ) s.send_message(msg) s.quit() print('done') def check(self): if self.is_update_needed(): self.trigger_update() def check_token(self, request_token): if not os.path.exists(self.token_filepath): return False with open(self.token_filepath, 'rb') as fh: token = fh.read().decode('ascii') return hmac.compare_digest(token, request_token) def start_checking(self, check_interval): def check(): for i in itertools.cycle(range(check_interval)): if self.stop_requested: break if not i: print('Checking') self.check() time.sleep(1) self.stop_requested = False self.checker = threading.Thread(target=check) self.checker.start() def stop_checking(self): self.stop_requested = True try: self.checker.join() except KeyboardInterrupt: print('Forcing exit!') def runserver(config): address = ( config.get('server', 'interface'), config.getint('server', 'port') ) httpd = HTTPServer(address, Handler) httpd.updater = Updater(config) httpd.updater.start_checking(config.getint('checker', 'interval')) try: httpd.serve_forever() except KeyboardInterrupt: print('Quitting...') httpd.updater.stop_checking()
threading_barrier_abort.py
# Copyright (c) 2016 Doug Hellmann. All rights reserved. """ """ # end_pymotw_header import threading import time def worker(barrier): print( threading.current_thread().name, "waiting for barrier with {} others".format(barrier.n_waiting), ) try: worker_id = barrier.wait() except threading.BrokenBarrierError: print(threading.current_thread().name, "aborting") else: print(threading.current_thread().name, "after barrier", worker_id) NUM_THREADS = 3 barrier = threading.Barrier(NUM_THREADS + 1) threads = [ threading.Thread(name="worker-%s" % i, target=worker, args=(barrier,)) for i in range(NUM_THREADS) ] for t in threads: print(t.name, "starting") t.start() time.sleep(0.1) barrier.abort() for t in threads: t.join()
__init__.py
# Copyright 2011 James McCauley # # This file is part of POX. # # POX is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # POX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with POX. If not, see <http://www.gnu.org/licenses/>. import pxpcap as pcapc from pox.lib.addresses import IPAddr, EthAddr from threading import Thread import pox.lib.packet as pkt import copy class PCap (object): @staticmethod def get_devices (): def ip (addr): if addr is None: return None return IPAddr(addr, networkOrder=True) def link (addr): if addr is None: return None if len(addr) != 6: return None return EthAddr(addr) devs = pcapc.findalldevs() out = {} for d in devs: addrs = {} n = {'desc':d[1],'addrs':addrs} out[d[0]] = n for a in d[2]: if a[0] == 'AF_INET': na = {} addrs[a[0]] = na na['addr'] = ip(a[1]) na['netmask'] = ip(a[2]) na['broadaddr'] = ip(a[3]) na['dstaddr'] = ip(a[4]) elif a[0] == 'AF_LINK': na = {} addrs[a[0]] = na na['addr'] = link(a[1]) na['netmask'] = link(a[2]) na['broadaddr'] = link(a[3]) na['dstaddr'] = link(a[4]) elif a[0] == 'AF_PACKET': addrs[a[0]] = {'addr':link(a[1])} elif a[0] == 'ethernet': addrs[a[0]] = {'addr':link(a[1])} return out @staticmethod def get_device_names (): return [d[0] for d in pcapc.findalldevs()] def __init__ (self, device = None, promiscuous = True, period = 10, start = True, callback = None, filter = None): if filter is not None: self.deferred_filter = (filter,) else: self.deferred_filter = None self.packets_received = 0 self.packets_dropped = 0 self._thread = None self.pcap = None self.promiscuous = promiscuous self.device = None self.period = period self.netmask = IPAddr("0.0.0.0") self._quitting = False self.addresses = {} if callback is None: self.callback = self.__class__._handle_rx else: self.callback = callback if device is not None: self.open(device) if self.pcap is not None: if start: self.start() def _handle_rx (self, data, sec, usec, length): pass def open (self, device, promiscuous = None, period = None, incoming = True, outgoing = False): assert self.device is None self.addresses = self.get_devices()[device]['addrs'] if 'AF_INET' in self.addresses: self.netmask = self.addresses['AF_INET'].get('netmask') if self.netmask is None: self.netmask = IPAddr("0.0.0.0") #print "NM:",self.netmask #print self.addresses['AF_LINK']['addr'] self.device = device if period is not None: self.period = period if promiscuous is not None: self.promiscuous = promiscuous self.pcap = pcapc.open_live(device, 65535, 1 if self.promiscuous else 0, self.period) pcapc.setdirection(self.pcap, incoming, outgoing) self.packets_received = 0 self.packets_dropped = 0 if self.deferred_filter is not None: self.set_filter(*self.deferred_filter) self.deferred_filter = None def set_direction (self, incoming, outgoing): pcapc.setdirection(self.pcap, incoming, outgoing) def _thread_func (self): while not self._quitting: pcapc.dispatch(self.pcap,100,self.callback,self) self.packets_received,self.packets_dropped = pcapc.stats(self.pcap) self._quitting = False self._thread = None def _handle_GoingDownEvent (self, event): self.close() def start (self): assert self._thread is None from pox.core import core core.addListeners(self, weak=True) self._thread = Thread(target=self._thread_func) #self._thread.daemon = True self._thread.start() def stop (self): t = self._thread if t is not None: self._quitting = True pcapc.breakloop(self.pcap) t.join() def close (self): if self.pcap is None: return self.stop() pcapc.close(self.pcap) self.pcap = None def __del__ (self): self.close() def inject (self, data): if isinstance(data, pkt.ethernet): data = data.pack() if not isinstance(data, bytes): data = bytes(data) # Give it a try... return pcapc.inject(self.pcap, data) def set_filter (self, filter, optimize = True): if self.pcap is None: self.deferred_filter = (filter, optimize) return if isinstance(filter, str): filter = Filter(filter, optimize, self.netmask.toSignedN(), pcap_obj=self) elif isinstance(filter, Filter): pass else: raise RuntimeError("Filter must be string or Filter object") pcapc.setfilter(self.pcap, filter._pprogram) class Filter (object): def __init__ (self, filter, optimize = True, netmask = None, pcap_obj = None, link_type = 1, snaplen = 65535): self._pprogram = None if netmask is None: netmask = 0 elif isinstance(netmask, IPAddr): netmask = netmask.toSignedN() delpc = False if pcap_obj is None: delpc = True pcap_obj = pcapc.open_dead(link_type, snaplen) if isinstance(pcap_obj, PCap): pcap_obj = pcap_obj.pcap self._pprogram = pcapc.compile(pcap_obj, filter, 1 if optimize else 0, netmask) if delpc: pcapc.close(pcap_obj) def __del__ (self): if self._pprogram: pcapc.freecode(self._pprogram) _link_type_names = {} for k,v in copy.copy(pcapc.__dict__).iteritems(): if k.startswith("DLT_"): _link_type_names[v] = k def get_link_type_name (dlt): return _link_type_names.get(dlt, "<Unknown " + str(dlt) + ">") def launch (interface = "en1"): """ Test function """ global drop,total,bytes_got,bytes_real,bytes_diff drop = 0 total = 0 bytes_got = 0 bytes_real = 0 bytes_diff = 0 def cb (obj, data, sec, usec, length): global drop,total,bytes_got,bytes_real,bytes_diff #print ">>>",data t,d = pcapc.stats(obj.pcap) bytes_got += len(data) bytes_real += length nbd = bytes_real - bytes_got if nbd != bytes_diff: bytes_diff = nbd print "lost bytes:",nbd if t > total: total = t + 500 print t,"total" if d > drop: drop = d print d, "dropped" p = pkt.ethernet(data) ip = p.find('ipv4') if ip: print ip.srcip,"\t",ip.dstip, p print "\n".join(["%i. %s" % x for x in enumerate(PCap.get_device_names())]) if interface.startswith("#"): interface = int(interface[1:]) interface = PCap.get_device_names()[interface] print "Interface:",interface p = PCap(interface, callback = cb, filter = "icmp")#[icmptype] != icmp-echoreply") #filter = "ip host 74.125.224.148") def ping (eth='00:18:02:6e:ce:55', ip='192.168.0.1'): e = pkt.ethernet() e.src = p.addresses['ethernet']['addr'] e.dst = EthAddr(eth) e.type = e.IP_TYPE ipp = pkt.ipv4() ipp.protocol = ipp.ICMP_PROTOCOL ipp.srcip = p.addresses['AF_INET']['addr'] ipp.dstip = IPAddr(ip) icmp = pkt.icmp() icmp.type = pkt.ICMP.TYPE_ECHO_REQUEST icmp.payload = "PingPing" * 6 ipp.payload = icmp e.payload = ipp p.inject(e) import code code.interact(local=locals())
output_processor.py
# Copyright Jamie Allsop 2011-2019 # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) #------------------------------------------------------------------------------- # Output Processor #------------------------------------------------------------------------------- import subprocess import sys import os import re import time import threading import shlex import colorama import platform import logging import cuppa.timer from cuppa.colourise import as_colour, as_emphasised, as_highlighted, as_notice from cuppa.log import logger from cuppa.utility.python2to3 import as_str, errno, Queue def command_available( command ): try: with open(os.devnull) as devnull: subprocess.Popen( shlex.split( command ), stdout=devnull, stderr=devnull ).communicate() except OSError as e: if e.errno == errno.ENOENT: return False return True class AutoFlushFile: def __init__( self, f ): self.f = f def flush( self ): self.f.flush() def write( self, x ): self.f.write(x) self.f.flush() class LineConsumer: def __init__( self, call_readline, processor=None ): self.call_readline = call_readline self.processor = processor def __call__( self ): for line in iter( self.call_readline, "" ): line = as_str( line.rstrip() ) if line: if self.processor: line = self.processor( line ) if line: print( line ) else: print( line ) class IncrementalSubProcess: @classmethod def Popen2( cls, stdout_processor, stderr_processor, args_list, **kwargs ): kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.PIPE timing_enabled = logger.isEnabledFor( logging.DEBUG ) suppress_output = False if 'suppress_output' in kwargs: suppress_output = kwargs['suppress_output'] del kwargs['suppress_output'] use_shell = False if 'scons_env' in kwargs: use_shell = kwargs['scons_env'].get_option( 'use-shell' ) del kwargs['scons_env'] orig_stdout = sys.stdout orig_stderr = sys.stderr try: # TODO: Review this as it might be needed for Windows otherwise replace # the wrapped values with orig_stdout and orig_stderr respectively sys.stdout = AutoFlushFile( colorama.initialise.wrapped_stdout ) sys.stderr = AutoFlushFile( colorama.initialise.wrapped_stderr ) process = None stderr_thread = None timer = timing_enabled and cuppa.timer.Timer() or None if timer: logger.debug( "Command [{}] - Running...".format( as_notice(str(timer.timer_id())) ) ) close_fds = platform.system() == "Windows" and False or True if not suppress_output: sys.stdout.write( " ".join(args_list) + "\n" ) process = subprocess.Popen( use_shell and " ".join(args_list) or args_list, **dict( kwargs, close_fds=close_fds, shell=use_shell, universal_newlines=True ) ) stderr_consumer = LineConsumer( process.stderr.readline, stderr_processor ) stdout_consumer = LineConsumer( process.stdout.readline, stdout_processor ) stderr_thread = threading.Thread( target=stderr_consumer ) stderr_thread.start() stdout_consumer(); stderr_thread.join() process.wait() if timer: timer.stop() logger.debug( "Command [{}] - Elapsed {}".format( as_notice(str(timer.timer_id())), cuppa.timer.as_string( timer.elapsed() ) ) ) return process.returncode except Exception as e: if timer: timer.stop() logger.debug( "Command [{}] - Elapsed {}".format( as_notice(str(timer.timer_id())), cuppa.timer.as_string( timer.elapsed() ) ) ) logger.error( "IncrementalSubProcess.Popen2() failed with error [{}]".format( str(e) ) ) if process: logger.info( "Killing existing POpen object" ) process.kill() if stderr_thread: logger.info( "Joining any running threads" ) stderr_thread.join() raise e finally: sys.stdout = orig_stdout sys.stderr = orig_stderr @classmethod def Popen( cls, processor, args_list, **kwargs ): return cls.Popen2( processor, processor, args_list, **kwargs ) class PSpawn(object): def __init__( self, pspawn, sh, escape, cmd, args, env, stdout, stderr ): self._pspawn = pspawn self._sh = sh self._escape = escape self._cmd = cmd self._args = args self._env = env self._stdout = stdout self._stderr = stderr self._exception = None def __call__( self ): try: self._returncode = self._pspawn( self._sh, self._escape, self._cmd, self._args, self._env, self._stdout, self._stderr ) except BaseException: self._exception = sys.exc_info() def returncode( self ): if self._exception != None: logger.error("pspawn terminated with exception [{}]".format( str(self._exception) ) ) raise self._exception return self._returncode class Stream(object): def __init__( self, processor, name ): self._queue = Queue.Queue() self._processor = processor self._name = name def flush( self ): pass def write( self, text ): logger.trace( "Stream _queue.put [{}]".format( self._name ) ) self._queue.put( text ) def read( self, block ): try: logger.trace( "Stream _queue.get [{}]".format( self._name ) ) text = self._queue.get( block ) if text: for line in text.splitlines(): if self._processor: line = self._processor( line ) if line: print( line ) else: print( line ) self._queue.task_done() except Queue.Empty: logger.trace( "Stream Queue.Empty raised [{}]".format( self._name ) ) def join( self ): if self._queue.empty(): logger.trace( "Stream _queue.empty() - flush with None [{}]".format( self._name ) ) self._queue.put( None ) self._queue.join() class Reader(object): def __init__( self, stream, finished ): self._stream = stream self._finished = finished def __call__( self ): while not self._finished.is_set(): self._stream.read(True) self._stream.read(False) class Processor: def __init__( self, scons_env ): self.scons_env = scons_env @classmethod def install( cls, env ): global _pspawn _pspawn = env['PSPAWN'] output_processor = cls( env ) if platform.system() == "Windows": env['SPAWN'] = output_processor.windows_spawn else: env['SPAWN'] = output_processor.posix_spawn def posix_spawn( self, sh, escape, cmd, args, env ): processor = SpawnedProcessor( self.scons_env ) returncode = IncrementalSubProcess.Popen( processor, [ arg.strip('"') for arg in args ], env=env, suppress_output=True, ) summary = processor.summary( returncode ) if summary: print( summary ) return returncode def windows_spawn( self, sh, escape, cmd, args, env ): processor = SpawnedProcessor( self.scons_env ) stdout = Stream( processor, "stdout" ) stderr = Stream( processor, "stderr" ) pspawn = PSpawn( _pspawn, sh, escape, cmd, args, env, stdout, stderr ) pspawn_thread = threading.Thread( target=pspawn ) finished = threading.Event() pspawn_thread.start() stdout_thread = threading.Thread( target = Reader( stdout, finished ) ) stdout_thread.start() stderr_thread = threading.Thread( target = Reader( stderr, finished ) ) stderr_thread.start() pspawn_thread.join() logger.trace( "Processor - PSPAWN joined" ) finished.set() stdout.join() logger.trace( "Processor - STDOUT stream joined" ) stdout_thread.join() logger.trace( "Processor - STDOUT thread joined" ) stderr.join() logger.trace( "Processor - STDERR stream joined" ) stderr_thread.join() logger.trace( "Processor - STDERR thread joined" ) returncode = pspawn.returncode() summary = processor.summary( returncode ) if summary: print( summary ) return returncode class SpawnedProcessor(object): def __init__( self, scons_env ): self._processor = ToolchainProcessor( scons_env['toolchain'], scons_env['minimal_output'], scons_env['ignore_duplicates'] ) def __call__( self, line ): return self._processor( line ) def summary( self, returncode ): return self._processor.summary( returncode ) class ToolchainProcessor: def __init__( self, toolchain, minimal_output, ignore_duplicates ): self.toolchain = toolchain self.minimal_output = minimal_output self.ignore_duplicates = ignore_duplicates self.errors = 0 self.warnings = 0 self.start_time = time.time() self.error_messages = {} self.warning_messages = {} self.ignore_current_message = False def filtered_duplicate( self, line, existing_messages ): if self.ignore_duplicates and line in existing_messages: existing_messages[line] +=1 self.ignore_current_message = True return None else: self.ignore_current_message = False existing_messages[line] = 1 return line def filtered_line( self, line=None, meaning=None ): if meaning == "error": return self.filtered_duplicate( line, self.error_messages ) if meaning == "warning": return self.filtered_duplicate( line, self.warning_messages ) if self.minimal_output or self.ignore_current_message: return None else: return line def __call__( self, line ): ( matches, interpretor, error_id, warning_id ) = self.interpret( line ) if matches: highlights = interpretor['highlight'] display = interpretor['display'] meaning = interpretor['meaning'] file = interpretor['file'] message = '' for match in display: element = matches.group( match ) if match == file and ( meaning == 'error' or meaning == 'warning' ): element = self.normalise_path( element ) element = as_colour( meaning, element ) if match in highlights: element = as_emphasised( element ) message += element message = self.filtered_line( message + "\n", meaning ) if meaning == 'error': if message: message = as_highlighted( meaning, " = Error " + str(error_id) + " = ") + "\n" + message else: self.errors -= 1 elif meaning == 'warning': if message: message = as_highlighted( meaning, " = Warning " + str(warning_id) + " = ") + "\n" + message else: self.warnings -= 1 return message return self.filtered_line( line ) def normalise_path( self, file_path ): normalised_path = file_path if os.path.exists( file_path ): normalised_path = os.path.relpath( os.path.realpath( file_path ) ) # if normalised_path[0] != '.' and normalised_path[0] != os.path.sep: # normalised_path = '.' + os.path.sep + normalised_path # return os.path.abspath( normalised_path ) return normalised_path def interpret( self, line ): Interpretors = self.toolchain.output_interpretors() for interpretor in Interpretors: Regex = interpretor['regex'] Matches = re.match( Regex, line ) if Matches: error_id = 0 warning_id = 0 if interpretor['meaning'] == 'error': self.errors += 1 error_id = self.errors elif interpretor['meaning'] == 'warning': self.warnings += 1 warning_id = self.warnings return ( Matches, interpretor, error_id, warning_id, ) return ( None, None, None, None, ) def summary( self, returncode ): elapsed_time = time.time() - self.start_time Summary = '' if returncode: Summary += as_highlighted( 'summary', " === Process Terminated with status " + str(returncode) + " (Elapsed " + str(elapsed_time) + "s)" + " === ") + "\n" if self.errors: Summary += as_highlighted( 'error', " === Errors " + str(self.errors) + " === ") if self.warnings: Summary += as_highlighted( 'warning', " === Warnings " + str(self.warnings) + " === ") return Summary
exchange.py
# # Copyright 2017-2018 Government of Canada # Public Services and Procurement Canada - buyandsell.gc.ca # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Implementation of the shared Exchange message bus and related classes for sending and acting upon messages """ import asyncio from collections import deque from concurrent.futures import Future, ThreadPoolExecutor import logging import multiprocessing as mp import os from queue import Queue from threading import get_ident, Event, Thread import time import traceback from typing import Awaitable, Callable, NamedTuple, Sequence import aiohttp from . import eventloop LOGGER = logging.getLogger(__name__) _MESSAGE_FIELDS = {} def format_type_name(ctype): """ Convert a type or list of types to a string """ if isinstance(ctype, Sequence): return '[{}]'.format(', '.join(map(format_type_name, ctype))) elif ctype is None: return 'None' return ctype.__name__ class ExchangeMessage: """ A common base class for exchange messages """ __slots__ = ('_values',) _fields = () def __init__(self, *args, **kwargs): names, types, defaults, _positions = self._field_specs vals = [] idx = 0 if len(args) + len(kwargs) > len(names): raise TypeError("Too many arguments to constructor") for idx, name in enumerate(names): ftype = types.get(name) if idx < len(args): val = args[idx] else: if name in kwargs: val = kwargs[name] elif name in defaults: val = defaults[name] else: raise TypeError("Property not provided to constructor: {}".format(name)) if val is not None and ftype is not None and not isinstance(val, ftype): raise TypeError("Incorrect type for property '{}' ({}), expected {}".format( name, format_type_name(type(val)), format_type_name(ftype))) vals.append(val) self._values = tuple(vals) @property def _field_specs(self): cname = self.__class__.__name__ if cname not in _MESSAGE_FIELDS: names = [] defaults = {} positions = {} types = {} for idx, field in enumerate(self._fields): if isinstance(field, tuple): name = field[0] if len(field) > 1: types[name] = field[1] if len(field) > 2: defaults[name] = field[2] else: name = field names.append(name) positions[name] = idx _MESSAGE_FIELDS[cname] = (names, types, defaults, positions) return _MESSAGE_FIELDS[cname] @property def _field_names(self): return self._field_specs[0] @property def _field_types(self): return self._field_specs[1] @property def _field_defaults(self): return self._field_specs[2] @property def _field_positions(self): return self._field_specs[3] def __iter__(self): return ((fname, self[idx]) for (idx, fname) in enumerate(self._field_names)) def __getattr__(self, name): if name in self._field_names: return self._values[self._field_positions[name]] raise AttributeError("Unknown attribute: {}".format(name)) def __getitem__(self, key): if isinstance(key, (slice, int)): return self._values[key] return getattr(self, key) def get(self, name: str, defval=None): """ Get a property of the message by name Args: name: the property name defval: the default value to return if the property is not defined """ return getattr(self, name, defval) def __repr__(self): cls = self.__class__.__name__ params = ['{}={}'.format(fname, self[idx]) for (idx, fname) in enumerate(self._field_names)] return '{}({})'.format(cls, ', '.join(params)) class ExchangeFail(ExchangeMessage): """ An error class to represent an exception in message processing This is not a subclass of :class:`Exception` as that cannot be pickled and transported over the message bus """ _fields = ('value', 'exc_info') def __init__(self, value, exc_info=True): if exc_info is True: # cannot pass real exception or traceback through the message pipe exc_info = traceback.format_exc() super(ExchangeFail, self).__init__(value, exc_info) def format(self) -> str: """ Format this :class:`ExchangeFail` instance as a string including the traceback, if any """ ret = '{}'.format(self.value) if self.exc_info: ret += "\n" + str(self.exc_info) return ret def __repr__(self): cls = self.__class__.__name__ ret = '{}(value={})'.format(cls, self.value) if self.exc_info: ret += "\n" + str(self.exc_info) return ret class StopMessage(ExchangeMessage): """ Basic stop-processing message for :class:`MessageProcessor` instances """ pass MessageWrapper = NamedTuple('MessageWrapper', [ ('from_pid', str), ('ident', str), ('message', ExchangeMessage), ('ref', str)]) MessageWrapper.__new__.__defaults__ = (None,) MessageWrapper.__doc__ = """ A wrapper for a message being passed through the :class:`Exchange` message bus Attributes: from_pid (str): The identifier of the sending service ident (str): A unique identifier for the message, used to tag responses message (ExchangeMessage): The message received ref (str): An optional identifier for the message being responded to """ QueuedMessage = NamedTuple('QueuedMessage', [ ('to_pid', str), ('message', ExchangeMessage)]) QueuedMessage.__doc__ = """ A wrapper for a message queued to be sent to the exchange Attributes: to_pid (str): The identifier of the recipient service message (ExchangeMessage): The message to be sent """ class Exchange: """ A central message exchange hub for receiving requests and passing them to processors which may live in a different thread or process, but have a known identifier. Multiple processors may also respond to the same identifier in order to share processing. Responses are optional and can be tied to the original request. """ def __init__(self): self._cmd_pipe = mp.Pipe() self._cmd_lock = mp.Lock() self._proc = None self._req_cond = mp.Condition(mp.Lock()) def start(self, process: bool = True) -> None: """ Start the message exchange as a thread or process """ if process: evt = mp.Event() proc = mp.Process(target=self._run, args=(evt,)) else: evt = Event() proc = Thread(target=self._run, args=(evt,)) proc.daemon = True proc.start() evt.wait() self._proc = proc LOGGER.info('Started exchange') def stop(self, drain: bool = True) -> None: """ Send a stop signal to the polling thread """ LOGGER.info('Stopping exchange') with self._req_cond: self._cmd('stop', drain) # wake all threads waiting for an incoming message self._req_cond.notify_all() def join(self) -> None: """ Wait for the exchange to finish running """ self._proc.join() def status(self) -> dict: """ Retrieve the status from the polling thread Returns: A dict in the form {'pending': int, 'processed': int, 'total': int} representing the total numbers of messages handled by the exchange """ with self._req_cond: return self._cmd('status') def _cmd(self, *command): """ Execute a command against the exchange, using a process lock to synchronize requests and responses. Supported commands are currently `send`, `recv`, `status` and `stop` """ with self._cmd_lock: self._cmd_pipe[1].send(command) return self._cmd_pipe[1].recv() def register(self, to_pid: str) -> bool: """ Register a listener on the exchange """ return self._cmd('register', to_pid) def is_registered(self, to_pid: str) -> bool: """ Check if a listener is currently running """ return self._cmd('check', to_pid) def send(self, to_pid: str, wrapper: MessageWrapper) -> bool: """ Add a message to the bus, blocking until the processing thread is ready Args: to_pid: The identifier for the receiving service wrapper: The message to be added to the queue Returns: True if the message is successfully added to the queue """ # Blocks until we have access to the message queues and command pipe # FIXME add a maximum buffer size for the message queues and allow blocking # until there is room in the buffer (optional blocking=True argument) with self._req_cond: LOGGER.debug('send to %s/%s %s', to_pid, wrapper.ref, wrapper.message) status = self._cmd('send', to_pid, wrapper) # wake all threads waiting for an incoming message self._req_cond.notify_all() return status def recv(self, to_pid: str, blocking: bool = True, timeout=None) -> MessageWrapper: """ Receive a message from the bus Args: to_pid: The identifier of the recipient service blocking: Whether to sleep this thread until a message is received timeout: An optional timeout before aborting Returns: The next message in the queue, or None """ #pylint: disable=broad-except try: LOGGER.debug('recv %s', to_pid) locked = self._req_cond.acquire(blocking) message = None if locked: message = self._cmd('recv', to_pid) while message is None and (blocking or timeout is not None): #LOGGER.warning("Locked (%r), wait", locked) locked = self._req_cond.wait(timeout) if locked: message = self._cmd('recv', to_pid) if not locked or timeout is not None: break if locked: self._req_cond.release() except Exception: LOGGER.exception('Error in recv:') raise return message def _drain(self) -> None: while self._cmd('drain'): time.sleep(1) def _run(self, event: Event) -> None: """ The message processing loop """ drain = Thread(target=self._drain) drain.start() #pylint: disable=broad-except pending = 0 processed = {} queue = {} stop_time = None event.set() try: while True: command = self._cmd_pipe[0].recv() if command[0] == 'register': to_pid = command[1] if to_pid and to_pid not in queue: queue[to_pid] = deque() self._cmd_pipe[0].send(True) LOGGER.debug("registered %s", to_pid) else: self._cmd_pipe[0].send(False) elif command[0] == 'check': to_pid = command[1] self._cmd_pipe[0].send(to_pid and to_pid in queue) elif command[0] == 'send': if stop_time: LOGGER.debug("rejected message %s %s", command[1], command[2]) self._cmd_pipe[0].send(False) else: to_pid = command[1] if to_pid in queue: queue[to_pid].append(command[2]) pending += 1 else: self._cmd_pipe[0].send(True) self._cmd_pipe[0].send(True) elif command[0] == 'recv': to_pid = command[1] wrapper = None if to_pid in queue: try: wrapper = queue[to_pid].popleft() processed[to_pid] = processed.get(to_pid, 0) + 1 pending -= 1 except IndexError: pass if wrapper and isinstance(wrapper.message, StopMessage): pending -= len(queue[to_pid]) del queue[to_pid] LOGGER.debug("unregistered %s", to_pid) self._cmd_pipe[0].send(wrapper) elif command[0] == 'status': total = sum(processed.values()) self._cmd_pipe[0].send({ 'pending': pending, 'processed': processed, 'total': total}) elif command[0] == 'drain': # clean up expired messages ... if stop_time: if not pending or time.time() - stop_time >= 5: if pending: LOGGER.debug("terminating with %s messages pending", pending) self._cmd_pipe[0].send(False) break self._cmd_pipe[0].send(True) elif command[0] == 'stop': for to_pid in queue: LOGGER.debug("ordering %s to stop", to_pid) queue[to_pid].append(MessageWrapper(None, None, StopMessage())) pending += 1 stop_time = time.time() self._cmd_pipe[0].send(True) else: raise ValueError('Unrecognized command: {}'.format(command[0])) except Exception: LOGGER.exception('Error in exchange:') class MessageTarget: """ A wrapper for sending messages to a single target. Example: >>> target = MessageTarget(target_pid, exchange, my_pid) >>> target.send_noreply('hello') True """ def __init__(self, pid: str, exchange: Exchange, from_pid: str = None): self._pid = pid self._from_pid = from_pid self._exchange = exchange @property def pid(self) -> str: """ Accessor for the identifier of the recipient service """ return self._pid @property def exchange(self) -> Exchange: """ Accessor for the :class:`Exchange` used by this target """ return self._exchange @property def from_pid(self) -> str: """ Accessor for the identifier of the sending service """ return self._from_pid def _send_message(self, message: MessageWrapper) -> bool: """ Perform the actual addition to the message queue Args: message: the message to be sent """ return self._exchange.send(self._pid, message) def send( self, ident: str, message: ExchangeMessage, ref: str = None, from_pid: str = None) -> bool: """ Send a message to the recipient service Args: ident: The identifier used by the message response message: The message being sent ref: An optional identifier for the message being responded to from_pid: An optional override for the sender identifier Returns: True if the message was successfully added to the queue """ return self._send_message(MessageWrapper( from_pid if from_pid is not None else self._from_pid, ident, message, ref)) def send_noreply( self, message: ExchangeMessage, ref: str = None, from_pid: str = None) -> bool: """ Send a message with no reply expected Returns: True if the message was successfully added to the queue """ return self.send(None, message, ref, from_pid) class MessageProcessor: """ A generic message processor which polls the exchange for messages sent to this endpoint and runs the abstract 'process' method to perform actions and send responses. """ def __init__(self, pid: str, exchange: Exchange): self._pid = pid self._exchange = exchange self._poll_thread = None @property def pid(self) -> str: """ Accessor for the identifier of this request processor service """ return self._pid @property def exchange(self) -> Exchange: """ Accessor for the :class:`Exchange` used by this request processor """ return self._exchange def get_message_target(self, pid: str) -> MessageTarget: """ Quickly create a :class:`MessageTarget` for a service on the same message bus """ return MessageTarget(pid, self._exchange, self._pid) def start(self, _wait: bool = True) -> None: """ Run a thread to poll for received messages """ self._poll_thread = Thread(target=self._run) self._poll_thread.start() def _start_run(self) -> bool: """ Perform any additional initializion in polling thread """ return self._exchange.register(self._pid) def join(self) -> None: """ Await our polling thread. `stop()` must be called in order to cause it to abort """ if self._poll_thread: self._poll_thread.join() def send_stop_message(self) -> bool: """ Send the service a stop signal to end processing """ return self.send_noreply(self._pid, StopMessage()) def stop(self, wait: bool = True) -> None: """ Send a stop signal to the polling thread in order to abort polling """ if self.send_stop_message(): while wait and self._exchange.is_registered(self._pid): time.sleep(0.01) def _stop_run(self) -> None: """ Perform any additional shutdown actions in polling thread """ pass def _run(self) -> None: """ The main thread run loop """ if not self._start_run(): return self._poll_messages() self._stop_run() def _poll_messages(self) -> None: """ The polling loop for receiving messages from the exchange """ #pylint: disable=broad-except try: while self._poll_message(): pass except Exception: LOGGER.exception('Exception while processing messages:') def _poll_message(self) -> bool: """ Wait for a message from the exchange """ #pylint: disable=broad-except # blocks until a message is available received = self._exchange.recv(self._pid) LOGGER.debug('%s processing message: %s', self._pid, received.message) if isinstance(received.message, StopMessage): return False try: if self._process_message(received) is False: return False except Exception: LOGGER.exception('Exception during message processing:') errmsg = ExchangeFail('Exception during message processing', True) self._reply_with_error(received, errmsg) return True def _reply_with_error( self, from_message: MessageWrapper, errmsg: ExchangeFail) -> bool: """ Send an error message back to the sender of a previous message Args: from_message: the message which triggered the error errmsg: the error message to be sent """ if isinstance(from_message.message, ExchangeFail): LOGGER.error(from_message.message.format()) return False return self.send_noreply(from_message.from_pid, errmsg, from_message.ident) def _send_message(self, to_pid: str, wrapper: MessageWrapper) -> bool: """ Perform the actual addition to the exchange message queue Args: to_pid: the identifier of the recipient message: the message to be sent """ return self._exchange.send(to_pid, wrapper) def send( self, to_pid: str, ident: str, message: ExchangeMessage, ref: str = None, from_pid: str = None) -> bool: """ Send a message to a recipient on the exchange Args: to_pid: The identifier of the recipient ident: The identifier of thie message, to be used by responses message: The content of the message ref: The identifier of the message being responded to from_pid: An optional override for the sender identifier Returns: True if the message was successfully added to the queue """ return self._send_message( to_pid, MessageWrapper(from_pid or self._pid, ident, message, ref)) def send_noreply( self, to_pid: str, message: ExchangeMessage, ref: str = None, from_pid: str = None) -> bool: """ Send a message with no reply expected Returns: True if the message was successfully added to the queue """ return self._send_message( to_pid, MessageWrapper(from_pid or self._pid, None, message, ref)) def _process_message(self, received: MessageWrapper) -> bool: """ Process a message from another service and optionally send a message in response Returns: `False` if the polling thread should terminate """ pass class LoggingTCPConnector(aiohttp.TCPConnector): def _release(self, key, protocol, *, should_close=False): close = should_close or self._force_close or protocol.should_close LOGGER.debug("Connection released: %s", close and "Closing" or "Leaving open") super(LoggingTCPConnector, self)._release(key, protocol, should_close=should_close) class RequestExecutor(MessageProcessor): """ An subclass of :class:`MessageProcessor` which starts a thread for each outgoing request to wait for responses. One of these should live in each process which wants to perform async requests via the :class:`Exchange` (like a webserver process). It normally assumes that all incoming messages are simply responses to earlier requests. Processing should not block the main thread (much) to avoid breaking asyncio. """ def __init__(self, pid: str, exchange: Exchange): super(RequestExecutor, self).__init__(pid, exchange) self._connector = None self._out_queue = None self._req_lock = None self._requests = {} self._runner = None def start(self, wait: bool = True) -> None: """ Initialize our :class:`eventloop.Runner` and run our polling thread to listen for messages """ self._out_queue = Queue() self._runner = eventloop.Runner() self._runner.start(wait) self._req_lock = asyncio.Lock(loop=self._runner.loop) # Poll for results in a thread from our thread pool self.run_thread(self._run, ident='polling thread {}'.format(self.pid)) def _start_run(self) -> bool: if not super(RequestExecutor, self)._start_run(): return False # Send outgoing messages to the exchange (without blocking our event loop) self.run_thread(self._send_messages, ident='sending thread {}'.format(self.pid)) return True # In the webserver environment, the process we're concerned with has already started # so just use start() instead def start_process(self) -> mp.Process: """ Start this executor in a new process """ def _start(): self._init_process() self.start() self._runner.join() asyncio.get_child_watcher() proc = mp.Process(target=_start) proc.start() return proc def runner(self) -> eventloop.Runner: """ Accessor for the event loop runner instance used to execute tasks """ return self._runner def _stop_run(self) -> None: """ Stop our sending thread and any other tasks in progress """ # stop sending messages self._out_queue.put_nowait(None) self._out_queue.join() # close TCP connector if self._connector: self._connector.close() # shut down event loop self._runner.stop() def run_task(self, proc: Awaitable) -> asyncio.Future: """ Add a coroutine task to be performed by the runner Args: proc: the coroutine to be executed in the runner's event loop """ return self._runner.run_task(proc) def run_thread(self, proc: Callable, *args, ident: str = None) -> asyncio.Future: """ Add a task to be processed, as either a coroutine or function Args: proc: the function to be run in the :class:`ThreadPoolExecutor` args: arguments to pass to the proc, if a function """ if ident and False: _proc = proc def proc(*args): tid = get_ident() LOGGER.info(">> start thread %s %s", ident, tid) ret = _proc(*args) LOGGER.info("<< end thread %s %s", ident, tid) return ret return self._runner.run_in_executor(None, proc, *args) def _init_process(self) -> None: """ Initialize ourselves in a newly started process """ # create new event loop after fork asyncio.get_event_loop().close() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) def _send_messages(self) -> None: """ Thread loop for sending messages added to the out-queue """ while True: msg = self._out_queue.get() if msg is None: self._out_queue.task_done() break self._exchange.send(msg.to_pid, msg.message) self._out_queue.task_done() def _send_message(self, to_pid: str, wrapper: MessageWrapper) -> bool: """ Add the message to our out-queue for processing instead of sending directly Args: to_pid: the identifier of the recipient message: the message to be sent """ self._out_queue.put_nowait(QueuedMessage(to_pid, wrapper)) return True async def _send_request(self, to_pid: str, request: ExchangeMessage, future: Future, timeout: int = None) -> None: """ Send a request to a target service on the exchange and add it to our collection to automatically associate the response later Args: to_pid: the target service identifier request: the message payload future: used to return the response to (potentially) another thread timeout: an optional timeout before cancelling the request """ message = MessageWrapper(self._pid, os.urandom(10), request) result = None async with self._req_lock: if message.ident in self._requests: future.set_exception(RuntimeError('Duplicate request identifier')) return self._requests[message.ident] = future result = self._send_message(to_pid, message) if not result: future.set_exception(RuntimeError('Request could not be processed')) elif timeout: self.run_task(self._cancel_request(message.ident, timeout)) async def _cancel_request(self, ident: str, timeout: int = None) -> None: """ Cancel an outstanding request Args: ident: the request identifier timeout: an optional timeout to wait before cancelling """ if timeout: await asyncio.sleep(timeout) async with self._req_lock: if ident in self._requests and not self._requests[ident].done(): self._requests[ident].cancel() def submit( self, to_pid: str, request: ExchangeMessage, timeout: int = None) -> asyncio.Future: """ Submit a message to another service and run a task to poll for the results Args: to_pid: the identifier of the target service request: the body of the message to be sent timeout: an optional timeout to wait before cancelling the request """ result = Future() self.run_task(self._send_request(to_pid, request, result, timeout)) return asyncio.wrap_future(result) async def _handle_message(self, received: MessageWrapper) -> bool: """ Handle a message received from another service on the exchange by awaking any tasks waiting for results Args: received: the received message to be processed """ result = False if received.ref: async with self._req_lock: if received.ref in self._requests: if not self._requests[received.ref].cancelled(): self._requests[received.ref].set_result(received.message) result = True self._requests = { ident: req for ident, req in self._requests.items() if not req.done()} return result async def _handle_message_task(self, received: MessageWrapper) -> None: """ Handle message processing within our own event loop Args: received: the message received from the exchange """ #pylint: disable=broad-except try: if not await self._handle_message(received): LOGGER.debug('unhandled message to %s/%s from %s: %s', self._pid, received.ref, received.from_pid, received.message) except Exception: errmsg = ExchangeFail('Exception during message processing', True) self._reply_with_error(received, errmsg) def _process_message(self, received: MessageWrapper) -> bool: """ Handle a message received from another service on the exchange Args: received: the received message to be processed """ # push the handling of the message into our own event loop self.run_task(self._handle_message_task(received)) return True @property def tcp_connector(self) -> aiohttp.TCPConnector: """ Return a connection pool associated with this event loop, which allows HTTP connection reuse """ if not self._connector: force_close = os.getenv('HTTP_FORCE_CLOSE_CONNECTIONS') force_close = bool(force_close) and force_close != 'false' self._connector = LoggingTCPConnector(force_close=force_close) return self._connector def http_client(self, *args, **kwargs) -> aiohttp.ClientSession: """ Construct an HTTP client using the shared connection pool """ no_reuse = os.getenv('HTTP_NO_CONNECTOR_REUSE') no_reuse = bool(no_reuse) and no_reuse != 'false' if 'connector' not in kwargs and not no_reuse: kwargs['connector'] = self.tcp_connector kwargs['connector_owner'] = False keep_cookies = os.getenv('HTTP_PRESERVE_COOKIES') keep_cookies = bool(keep_cookies) and keep_cookies != 'false' if 'cookie_jar' not in kwargs and not keep_cookies: kwargs['cookie_jar'] = aiohttp.DummyCookieJar() return aiohttp.ClientSession(*args, **kwargs) @property def http(self): """ A quick accessor for a default HTTP client instance """ return self.http_client() def get_request_target(self, pid: str) -> 'RequestTarget': """ Create a :class:`RequestTarget` for a specific service Args: pid: the identifer of the target service """ return RequestTarget(self, pid) class RequestTarget: """ An endpoint for a :class:`RequestExecutor` which uses submit() to poll for responses to requests. It must be created within the same process as the executor instance Example: >>> target = RequestTarget(executor, target_pid) >>> target.request('hello') Future<...> """ def __init__(self, executor: RequestExecutor, pid: str): self._executor = executor self._pid = pid @property def pid(self): """ Accessor for the target service identifier """ return self._pid @property def executor(self): """ Accessor for the :class:`RequestExecutor` instance """ return self._executor def request(self, message: ExchangeMessage, timeout: int = None) -> asyncio.Future: """ Send a request to the recipient service, awaiting the response in a method defined by the executor Args: message: The message to be sent timeout: An optional timeout for the message response """ return self._executor.submit( self.pid, message, timeout) class HelloProcessor(MessageProcessor): """ A simple request processor for testing response functionality or stress testing """ def _process_message(self, received: MessageWrapper) -> bool: self.send_noreply(received.from_pid, 'hello from {} {}'.format(os.getpid(), get_ident()), received.ident) class ThreadedHelloProcessor(HelloProcessor): """ A threaded request processor for testing delayed, blocking and non-blocking responses """ def __init__(self, pid, exchange, blocking=False, max_workers=5): super(ThreadedHelloProcessor, self).__init__(pid, exchange) self._blocking = blocking self._pool = None self._max_workers = max_workers def start(self, _wait: bool = True) -> None: self._pool = ThreadPoolExecutor(self._max_workers) #thread_name_prefix=self._pid self._pool.submit(self._run) def start_process(self) -> mp.Process: """ Start this demo processor as a process instead of a thread """ proc = mp.Process(target=lambda: self.start().result()) proc.start() return proc def _process_message(self, received: MessageWrapper) -> bool: if self._blocking: self._delayed_process(received) else: self._pool.submit(self._delayed_process, received) def _delayed_process(self, received: MessageWrapper) -> bool: time.sleep(1) return super(ThreadedHelloProcessor, self)._process_message(received) # Testing two workers dividing requests: # hello = ThreadedHelloProcessor('hello', exchange, blocking=True) # hello.start_process() # hello.start_process() # .. exchange.send('hello', None, None, 'poke') ..
test_decimal.py
# Copyright (c) 2004 Python Software Foundation. # All rights reserved. # Written by Eric Price <eprice at tjhsst.edu> # and Facundo Batista <facundo at taniquetil.com.ar> # and Raymond Hettinger <python at rcn.com> # and Aahz (aahz at pobox.com) # and Tim Peters """ These are the test cases for the Decimal module. There are two groups of tests, Arithmetic and Behaviour. The former test the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter test the pythonic behaviour according to PEP 327. Cowlishaw's tests can be downloaded from: http://speleotrove.com/decimal/dectest.zip This test module can be called from command line with one parameter (Arithmetic or Behaviour) to test each part, or without parameter to test both parts. If you're working through IDLE, you can import this test module and call test_main() with the corresponding argument. """ import math import os, sys import operator import warnings import pickle, copy import unittest import numbers import locale from test.support import (run_unittest, run_doctest, is_resource_enabled, requires_IEEE_754, requires_docstrings) from test.support import (check_warnings, import_fresh_module, TestFailed, run_with_locale, cpython_only) import random import time import warnings import inspect try: import threading except ImportError: threading = None C = import_fresh_module('decimal', fresh=['_decimal']) P = import_fresh_module('decimal', blocked=['_decimal']) orig_sys_decimal = sys.modules['decimal'] # fractions module must import the correct decimal module. cfractions = import_fresh_module('fractions', fresh=['fractions']) sys.modules['decimal'] = P pfractions = import_fresh_module('fractions', fresh=['fractions']) sys.modules['decimal'] = C fractions = {C:cfractions, P:pfractions} sys.modules['decimal'] = orig_sys_decimal # Useful Test Constant Signals = { C: tuple(C.getcontext().flags.keys()) if C else None, P: tuple(P.getcontext().flags.keys()) } # Signals ordered with respect to precedence: when an operation # produces multiple signals, signals occurring later in the list # should be handled before those occurring earlier in the list. OrderedSignals = { C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow, C.Overflow, C.DivisionByZero, C.InvalidOperation, C.FloatOperation] if C else None, P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow, P.Overflow, P.DivisionByZero, P.InvalidOperation, P.FloatOperation] } def assert_signals(cls, context, attr, expected): d = getattr(context, attr) cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d)) ROUND_UP = P.ROUND_UP ROUND_DOWN = P.ROUND_DOWN ROUND_CEILING = P.ROUND_CEILING ROUND_FLOOR = P.ROUND_FLOOR ROUND_HALF_UP = P.ROUND_HALF_UP ROUND_HALF_DOWN = P.ROUND_HALF_DOWN ROUND_HALF_EVEN = P.ROUND_HALF_EVEN ROUND_05UP = P.ROUND_05UP RoundingModes = [ ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR, ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN, ROUND_05UP ] # Tests are built around these assumed context defaults. # test_main() restores the original context. ORIGINAL_CONTEXT = { C: C.getcontext().copy() if C else None, P: P.getcontext().copy() } def init(m): if not m: return DefaultTestContext = m.Context( prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0) ) m.setcontext(DefaultTestContext) TESTDATADIR = 'decimaltestdata' if __name__ == '__main__': file = sys.argv[0] else: file = __file__ testdir = os.path.dirname(file) or os.curdir directory = testdir + os.sep + TESTDATADIR + os.sep skip_expected = not os.path.isdir(directory) # Make sure it actually raises errors when not expected and caught in flags # Slower, since it runs some things several times. EXTENDEDERRORTEST = False # Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY). EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False requires_extra_functionality = unittest.skipUnless( EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY") skip_if_extra_functionality = unittest.skipIf( EXTRA_FUNCTIONALITY, "test requires regular build") class IBMTestCases(unittest.TestCase): """Class which tests the Decimal class against the IBM test cases.""" def setUp(self): self.context = self.decimal.Context() self.readcontext = self.decimal.Context() self.ignore_list = ['#'] # List of individual .decTest test ids that correspond to tests that # we're skipping for one reason or another. self.skipped_test_ids = set([ # Skip implementation-specific scaleb tests. 'scbx164', 'scbx165', # For some operations (currently exp, ln, log10, power), the decNumber # reference implementation imposes additional restrictions on the context # and operands. These restrictions are not part of the specification; # however, the effect of these restrictions does show up in some of the # testcases. We skip testcases that violate these restrictions, since # Decimal behaves differently from decNumber for these testcases so these # testcases would otherwise fail. 'expx901', 'expx902', 'expx903', 'expx905', 'lnx901', 'lnx902', 'lnx903', 'lnx905', 'logx901', 'logx902', 'logx903', 'logx905', 'powx1183', 'powx1184', 'powx4001', 'powx4002', 'powx4003', 'powx4005', 'powx4008', 'powx4010', 'powx4012', 'powx4014', ]) if self.decimal == C: # status has additional Subnormal, Underflow self.skipped_test_ids.add('pwsx803') self.skipped_test_ids.add('pwsx805') # Correct rounding (skipped for decNumber, too) self.skipped_test_ids.add('powx4302') self.skipped_test_ids.add('powx4303') self.skipped_test_ids.add('powx4342') self.skipped_test_ids.add('powx4343') # http://bugs.python.org/issue7049 self.skipped_test_ids.add('pwmx325') self.skipped_test_ids.add('pwmx326') # Map test directives to setter functions. self.ChangeDict = {'precision' : self.change_precision, 'rounding' : self.change_rounding_method, 'maxexponent' : self.change_max_exponent, 'minexponent' : self.change_min_exponent, 'clamp' : self.change_clamp} # Name adapter to be able to change the Decimal and Context # interface without changing the test files from Cowlishaw. self.NameAdapter = {'and':'logical_and', 'apply':'_apply', 'class':'number_class', 'comparesig':'compare_signal', 'comparetotal':'compare_total', 'comparetotmag':'compare_total_mag', 'copy':'copy_decimal', 'copyabs':'copy_abs', 'copynegate':'copy_negate', 'copysign':'copy_sign', 'divideint':'divide_int', 'invert':'logical_invert', 'iscanonical':'is_canonical', 'isfinite':'is_finite', 'isinfinite':'is_infinite', 'isnan':'is_nan', 'isnormal':'is_normal', 'isqnan':'is_qnan', 'issigned':'is_signed', 'issnan':'is_snan', 'issubnormal':'is_subnormal', 'iszero':'is_zero', 'maxmag':'max_mag', 'minmag':'min_mag', 'nextminus':'next_minus', 'nextplus':'next_plus', 'nexttoward':'next_toward', 'or':'logical_or', 'reduce':'normalize', 'remaindernear':'remainder_near', 'samequantum':'same_quantum', 'squareroot':'sqrt', 'toeng':'to_eng_string', 'tointegral':'to_integral_value', 'tointegralx':'to_integral_exact', 'tosci':'to_sci_string', 'xor':'logical_xor'} # Map test-case names to roundings. self.RoundingDict = {'ceiling' : ROUND_CEILING, 'down' : ROUND_DOWN, 'floor' : ROUND_FLOOR, 'half_down' : ROUND_HALF_DOWN, 'half_even' : ROUND_HALF_EVEN, 'half_up' : ROUND_HALF_UP, 'up' : ROUND_UP, '05up' : ROUND_05UP} # Map the test cases' error names to the actual errors. self.ErrorNames = {'clamped' : self.decimal.Clamped, 'conversion_syntax' : self.decimal.InvalidOperation, 'division_by_zero' : self.decimal.DivisionByZero, 'division_impossible' : self.decimal.InvalidOperation, 'division_undefined' : self.decimal.InvalidOperation, 'inexact' : self.decimal.Inexact, 'invalid_context' : self.decimal.InvalidOperation, 'invalid_operation' : self.decimal.InvalidOperation, 'overflow' : self.decimal.Overflow, 'rounded' : self.decimal.Rounded, 'subnormal' : self.decimal.Subnormal, 'underflow' : self.decimal.Underflow} # The following functions return True/False rather than a # Decimal instance. self.LogicalFunctions = ('is_canonical', 'is_finite', 'is_infinite', 'is_nan', 'is_normal', 'is_qnan', 'is_signed', 'is_snan', 'is_subnormal', 'is_zero', 'same_quantum') def read_unlimited(self, v, context): """Work around the limitations of the 32-bit _decimal version. The guaranteed maximum values for prec, Emax etc. are 425000000, but higher values usually work, except for rare corner cases. In particular, all of the IBM tests pass with maximum values of 1070000000.""" if self.decimal == C and self.decimal.MAX_EMAX == 425000000: self.readcontext._unsafe_setprec(1070000000) self.readcontext._unsafe_setemax(1070000000) self.readcontext._unsafe_setemin(-1070000000) return self.readcontext.create_decimal(v) else: return self.decimal.Decimal(v, context) def eval_file(self, file): global skip_expected if skip_expected: raise unittest.SkipTest with open(file) as f: for line in f: line = line.replace('\r\n', '').replace('\n', '') #print line try: t = self.eval_line(line) except self.decimal.DecimalException as exception: #Exception raised where there shouldn't have been one. self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line) def eval_line(self, s): if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'): s = (s.split('->')[0] + '->' + s.split('->')[1].split('--')[0]).strip() else: s = s.split('--')[0].strip() for ignore in self.ignore_list: if s.find(ignore) >= 0: #print s.split()[0], 'NotImplemented--', ignore return if not s: return elif ':' in s: return self.eval_directive(s) else: return self.eval_equation(s) def eval_directive(self, s): funct, value = (x.strip().lower() for x in s.split(':')) if funct == 'rounding': value = self.RoundingDict[value] else: try: value = int(value) except ValueError: pass funct = self.ChangeDict.get(funct, (lambda *args: None)) funct(value) def eval_equation(self, s): if not TEST_ALL and random.random() < 0.90: return self.context.clear_flags() try: Sides = s.split('->') L = Sides[0].strip().split() id = L[0] if DEBUG: print("Test ", id, end=" ") funct = L[1].lower() valstemp = L[2:] L = Sides[1].strip().split() ans = L[0] exceptions = L[1:] except (TypeError, AttributeError, IndexError): raise self.decimal.InvalidOperation def FixQuotes(val): val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote') val = val.replace("'", '').replace('"', '') val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"') return val if id in self.skipped_test_ids: return fname = self.NameAdapter.get(funct, funct) if fname == 'rescale': return funct = getattr(self.context, fname) vals = [] conglomerate = '' quote = 0 theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions] for exception in Signals[self.decimal]: self.context.traps[exception] = 1 #Catch these bugs... for exception in theirexceptions: self.context.traps[exception] = 0 for i, val in enumerate(valstemp): if val.count("'") % 2 == 1: quote = 1 - quote if quote: conglomerate = conglomerate + ' ' + val continue else: val = conglomerate + val conglomerate = '' v = FixQuotes(val) if fname in ('to_sci_string', 'to_eng_string'): if EXTENDEDERRORTEST: for error in theirexceptions: self.context.traps[error] = 1 try: funct(self.context.create_decimal(v)) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 v = self.context.create_decimal(v) else: v = self.read_unlimited(v, self.context) vals.append(v) ans = FixQuotes(ans) if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'): for error in theirexceptions: self.context.traps[error] = 1 try: funct(*vals) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 # as above, but add traps cumulatively, to check precedence ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions] for error in ordered_errors: self.context.traps[error] = 1 try: funct(*vals) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s; expected %s" % (type(e), s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) # reset traps for error in ordered_errors: self.context.traps[error] = 0 if DEBUG: print("--", self.context) try: result = str(funct(*vals)) if fname in self.LogicalFunctions: result = str(int(eval(result))) # 'True', 'False' -> '1', '0' except Signals[self.decimal] as error: self.fail("Raised %s in %s" % (error, s)) except: #Catch any error long enough to state the test case. print("ERROR:", s) raise myexceptions = self.getexceptions() myexceptions.sort(key=repr) theirexceptions.sort(key=repr) self.assertEqual(result, ans, 'Incorrect answer for ' + s + ' -- got ' + result) self.assertEqual(myexceptions, theirexceptions, 'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions)) def getexceptions(self): return [e for e in Signals[self.decimal] if self.context.flags[e]] def change_precision(self, prec): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context._unsafe_setprec(prec) else: self.context.prec = prec def change_rounding_method(self, rounding): self.context.rounding = rounding def change_min_exponent(self, exp): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context._unsafe_setemin(exp) else: self.context.Emin = exp def change_max_exponent(self, exp): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context._unsafe_setemax(exp) else: self.context.Emax = exp def change_clamp(self, clamp): self.context.clamp = clamp class CIBMTestCases(IBMTestCases): decimal = C class PyIBMTestCases(IBMTestCases): decimal = P # The following classes test the behaviour of Decimal according to PEP 327 class ExplicitConstructionTest(unittest.TestCase): '''Unit tests for Explicit Construction cases of Decimal.''' def test_explicit_empty(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal(), Decimal("0")) def test_explicit_from_None(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, Decimal, None) def test_explicit_from_int(self): Decimal = self.decimal.Decimal #positive d = Decimal(45) self.assertEqual(str(d), '45') #very large positive d = Decimal(500000123) self.assertEqual(str(d), '500000123') #negative d = Decimal(-45) self.assertEqual(str(d), '-45') #zero d = Decimal(0) self.assertEqual(str(d), '0') # single word longs for n in range(0, 32): for sign in (-1, 1): for x in range(-5, 5): i = sign * (2**n + x) d = Decimal(i) self.assertEqual(str(d), str(i)) def test_explicit_from_string(self): Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation localcontext = self.decimal.localcontext #empty self.assertEqual(str(Decimal('')), 'NaN') #int self.assertEqual(str(Decimal('45')), '45') #float self.assertEqual(str(Decimal('45.34')), '45.34') #engineer notation self.assertEqual(str(Decimal('45e2')), '4.5E+3') #just not a number self.assertEqual(str(Decimal('ugly')), 'NaN') #leading and trailing whitespace permitted self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4') self.assertEqual(str(Decimal(' -7.89')), '-7.89') self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679') # unicode whitespace for lead in ["", ' ', '\u00a0', '\u205f']: for trail in ["", ' ', '\u00a0', '\u205f']: self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)), '9.311E+28') with localcontext() as c: c.traps[InvalidOperation] = True # Invalid string self.assertRaises(InvalidOperation, Decimal, "xyz") # Two arguments max self.assertRaises(TypeError, Decimal, "1234", "x", "y") # space within the numeric part self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03") self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0") # unicode whitespace self.assertRaises(InvalidOperation, Decimal, "\u00a0") self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0") # embedded NUL self.assertRaises(InvalidOperation, Decimal, "12\u00003") @cpython_only def test_from_legacy_strings(self): import _testcapi Decimal = self.decimal.Decimal context = self.decimal.Context() s = _testcapi.unicode_legacy_string('9.999999') self.assertEqual(str(Decimal(s)), '9.999999') self.assertEqual(str(context.create_decimal(s)), '9.999999') def test_explicit_from_tuples(self): Decimal = self.decimal.Decimal #zero d = Decimal( (0, (0,), 0) ) self.assertEqual(str(d), '0') #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(str(d), '-45') #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(str(d), '45.34') #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') #inf d = Decimal( (0, (), "F") ) self.assertEqual(str(d), 'Infinity') #wrong number of items self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) ) #bad sign self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2)) #bad exp self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') ) #bad coefficients self.assertRaises(ValueError, Decimal, (1, "xyz", 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) ) def test_explicit_from_list(self): Decimal = self.decimal.Decimal d = Decimal([0, [0], 0]) self.assertEqual(str(d), '0') d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25]) self.assertEqual(str(d), '-4.34913534E-17') d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25]) self.assertEqual(str(d), '-4.34913534E-17') d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25)) self.assertEqual(str(d), '-4.34913534E-17') def test_explicit_from_bool(self): Decimal = self.decimal.Decimal self.assertIs(bool(Decimal(0)), False) self.assertIs(bool(Decimal(1)), True) self.assertEqual(Decimal(False), Decimal(0)) self.assertEqual(Decimal(True), Decimal(1)) def test_explicit_from_Decimal(self): Decimal = self.decimal.Decimal #positive d = Decimal(45) e = Decimal(d) self.assertEqual(str(e), '45') #very large positive d = Decimal(500000123) e = Decimal(d) self.assertEqual(str(e), '500000123') #negative d = Decimal(-45) e = Decimal(d) self.assertEqual(str(e), '-45') #zero d = Decimal(0) e = Decimal(d) self.assertEqual(str(e), '0') @requires_IEEE_754 def test_explicit_from_float(self): Decimal = self.decimal.Decimal r = Decimal(0.1) self.assertEqual(type(r), Decimal) self.assertEqual(str(r), '0.1000000000000000055511151231257827021181583404541015625') self.assertTrue(Decimal(float('nan')).is_qnan()) self.assertTrue(Decimal(float('inf')).is_infinite()) self.assertTrue(Decimal(float('-inf')).is_infinite()) self.assertEqual(str(Decimal(float('nan'))), str(Decimal('NaN'))) self.assertEqual(str(Decimal(float('inf'))), str(Decimal('Infinity'))) self.assertEqual(str(Decimal(float('-inf'))), str(Decimal('-Infinity'))) self.assertEqual(str(Decimal(float('-0.0'))), str(Decimal('-0'))) for i in range(200): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(Decimal(x))) # roundtrip def test_explicit_context_create_decimal(self): Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation Rounded = self.decimal.Rounded nc = copy.copy(self.decimal.getcontext()) nc.prec = 3 # empty d = Decimal() self.assertEqual(str(d), '0') d = nc.create_decimal() self.assertEqual(str(d), '0') # from None self.assertRaises(TypeError, nc.create_decimal, None) # from int d = nc.create_decimal(456) self.assertIsInstance(d, Decimal) self.assertEqual(nc.create_decimal(45678), nc.create_decimal('457E+2')) # from string d = Decimal('456789') self.assertEqual(str(d), '456789') d = nc.create_decimal('456789') self.assertEqual(str(d), '4.57E+5') # leading and trailing whitespace should result in a NaN; # spaces are already checked in Cowlishaw's test-suite, so # here we just check that a trailing newline results in a NaN self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN') # from tuples d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.35E-17') # from Decimal prevdec = Decimal(500000123) d = Decimal(prevdec) self.assertEqual(str(d), '500000123') d = nc.create_decimal(prevdec) self.assertEqual(str(d), '5.00E+8') # more integers nc.prec = 28 nc.traps[InvalidOperation] = True for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0, 2**31-1, 2**31, 2**63-1, 2**63]: d = nc.create_decimal(v) self.assertTrue(isinstance(d, Decimal)) self.assertEqual(int(d), v) nc.prec = 3 nc.traps[Rounded] = True self.assertRaises(Rounded, nc.create_decimal, 1234) # from string nc.prec = 28 self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17') self.assertEqual(str(nc.create_decimal('45')), '45') self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity') self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123') # invalid arguments self.assertRaises(InvalidOperation, nc.create_decimal, "xyz") self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25)) self.assertRaises(TypeError, nc.create_decimal, "1234", "5678") # too many NaN payload digits nc.prec = 3 self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345') self.assertRaises(InvalidOperation, nc.create_decimal, Decimal('NaN12345')) nc.traps[InvalidOperation] = False self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN') self.assertTrue(nc.flags[InvalidOperation]) nc.flags[InvalidOperation] = False self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN') self.assertTrue(nc.flags[InvalidOperation]) def test_explicit_context_create_from_float(self): Decimal = self.decimal.Decimal nc = self.decimal.Context() r = nc.create_decimal(0.1) self.assertEqual(type(r), Decimal) self.assertEqual(str(r), '0.1000000000000000055511151231') self.assertTrue(nc.create_decimal(float('nan')).is_qnan()) self.assertTrue(nc.create_decimal(float('inf')).is_infinite()) self.assertTrue(nc.create_decimal(float('-inf')).is_infinite()) self.assertEqual(str(nc.create_decimal(float('nan'))), str(nc.create_decimal('NaN'))) self.assertEqual(str(nc.create_decimal(float('inf'))), str(nc.create_decimal('Infinity'))) self.assertEqual(str(nc.create_decimal(float('-inf'))), str(nc.create_decimal('-Infinity'))) self.assertEqual(str(nc.create_decimal(float('-0.0'))), str(nc.create_decimal('-0'))) nc.prec = 100 for i in range(200): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip def test_unicode_digits(self): Decimal = self.decimal.Decimal test_values = { '\uff11': '1', '\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372', '-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400', } for input, expected in test_values.items(): self.assertEqual(str(Decimal(input)), expected) class CExplicitConstructionTest(ExplicitConstructionTest): decimal = C class PyExplicitConstructionTest(ExplicitConstructionTest): decimal = P class ImplicitConstructionTest(unittest.TestCase): '''Unit tests for Implicit Construction cases of Decimal.''' def test_implicit_from_None(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals()) def test_implicit_from_int(self): Decimal = self.decimal.Decimal #normal self.assertEqual(str(Decimal(5) + 45), '50') #exceeding precision self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000)) def test_implicit_from_string(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals()) def test_implicit_from_float(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals()) def test_implicit_from_Decimal(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal(5) + Decimal(45), Decimal(50)) def test_rop(self): Decimal = self.decimal.Decimal # Allow other classes to be trained to interact with Decimals class E: def __divmod__(self, other): return 'divmod ' + str(other) def __rdivmod__(self, other): return str(other) + ' rdivmod' def __lt__(self, other): return 'lt ' + str(other) def __gt__(self, other): return 'gt ' + str(other) def __le__(self, other): return 'le ' + str(other) def __ge__(self, other): return 'ge ' + str(other) def __eq__(self, other): return 'eq ' + str(other) def __ne__(self, other): return 'ne ' + str(other) self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10') self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod') self.assertEqual(eval('Decimal(10) < E()'), 'gt 10') self.assertEqual(eval('Decimal(10) > E()'), 'lt 10') self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10') self.assertEqual(eval('Decimal(10) >= E()'), 'le 10') self.assertEqual(eval('Decimal(10) == E()'), 'eq 10') self.assertEqual(eval('Decimal(10) != E()'), 'ne 10') # insert operator methods and then exercise them oplist = [ ('+', '__add__', '__radd__'), ('-', '__sub__', '__rsub__'), ('*', '__mul__', '__rmul__'), ('/', '__truediv__', '__rtruediv__'), ('%', '__mod__', '__rmod__'), ('//', '__floordiv__', '__rfloordiv__'), ('**', '__pow__', '__rpow__') ] for sym, lop, rop in oplist: setattr(E, lop, lambda self, other: 'str' + lop + str(other)) setattr(E, rop, lambda self, other: str(other) + rop + 'str') self.assertEqual(eval('E()' + sym + 'Decimal(10)'), 'str' + lop + '10') self.assertEqual(eval('Decimal(10)' + sym + 'E()'), '10' + rop + 'str') class CImplicitConstructionTest(ImplicitConstructionTest): decimal = C class PyImplicitConstructionTest(ImplicitConstructionTest): decimal = P class FormatTest(unittest.TestCase): '''Unit tests for the format function.''' def test_formatting(self): Decimal = self.decimal.Decimal # triples giving a format, a Decimal, and the expected result test_values = [ ('e', '0E-15', '0e-15'), ('e', '2.3E-15', '2.3e-15'), ('e', '2.30E+2', '2.30e+2'), # preserve significant zeros ('e', '2.30000E-15', '2.30000e-15'), ('e', '1.23456789123456789e40', '1.23456789123456789e+40'), ('e', '1.5', '1.5e+0'), ('e', '0.15', '1.5e-1'), ('e', '0.015', '1.5e-2'), ('e', '0.0000000000015', '1.5e-12'), ('e', '15.0', '1.50e+1'), ('e', '-15', '-1.5e+1'), ('e', '0', '0e+0'), ('e', '0E1', '0e+1'), ('e', '0.0', '0e-1'), ('e', '0.00', '0e-2'), ('.6e', '0E-15', '0.000000e-9'), ('.6e', '0', '0.000000e+6'), ('.6e', '9.999999', '9.999999e+0'), ('.6e', '9.9999999', '1.000000e+1'), ('.6e', '-1.23e5', '-1.230000e+5'), ('.6e', '1.23456789e-3', '1.234568e-3'), ('f', '0', '0'), ('f', '0.0', '0.0'), ('f', '0E-2', '0.00'), ('f', '0.00E-8', '0.0000000000'), ('f', '0E1', '0'), # loses exponent information ('f', '3.2E1', '32'), ('f', '3.2E2', '320'), ('f', '3.20E2', '320'), ('f', '3.200E2', '320.0'), ('f', '3.2E-6', '0.0000032'), ('.6f', '0E-15', '0.000000'), # all zeros treated equally ('.6f', '0E1', '0.000000'), ('.6f', '0', '0.000000'), ('.0f', '0', '0'), # no decimal point ('.0f', '0e-2', '0'), ('.0f', '3.14159265', '3'), ('.1f', '3.14159265', '3.1'), ('.4f', '3.14159265', '3.1416'), ('.6f', '3.14159265', '3.141593'), ('.7f', '3.14159265', '3.1415926'), # round-half-even! ('.8f', '3.14159265', '3.14159265'), ('.9f', '3.14159265', '3.141592650'), ('g', '0', '0'), ('g', '0.0', '0.0'), ('g', '0E1', '0e+1'), ('G', '0E1', '0E+1'), ('g', '0E-5', '0.00000'), ('g', '0E-6', '0.000000'), ('g', '0E-7', '0e-7'), ('g', '-0E2', '-0e+2'), ('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig ('.0n', '3.14159265', '3'), # same for 'n' ('.1g', '3.14159265', '3'), ('.2g', '3.14159265', '3.1'), ('.5g', '3.14159265', '3.1416'), ('.7g', '3.14159265', '3.141593'), ('.8g', '3.14159265', '3.1415926'), # round-half-even! ('.9g', '3.14159265', '3.14159265'), ('.10g', '3.14159265', '3.14159265'), # don't pad ('%', '0E1', '0%'), ('%', '0E0', '0%'), ('%', '0E-1', '0%'), ('%', '0E-2', '0%'), ('%', '0E-3', '0.0%'), ('%', '0E-4', '0.00%'), ('.3%', '0', '0.000%'), # all zeros treated equally ('.3%', '0E10', '0.000%'), ('.3%', '0E-10', '0.000%'), ('.3%', '2.34', '234.000%'), ('.3%', '1.234567', '123.457%'), ('.0%', '1.23', '123%'), ('e', 'NaN', 'NaN'), ('f', '-NaN123', '-NaN123'), ('+g', 'NaN456', '+NaN456'), ('.3e', 'Inf', 'Infinity'), ('.16f', '-Inf', '-Infinity'), ('.0g', '-sNaN', '-sNaN'), ('', '1.00', '1.00'), # test alignment and padding ('6', '123', ' 123'), ('<6', '123', '123 '), ('>6', '123', ' 123'), ('^6', '123', ' 123 '), ('=+6', '123', '+ 123'), ('#<10', 'NaN', 'NaN#######'), ('#<10', '-4.3', '-4.3######'), ('#<+10', '0.0130', '+0.0130###'), ('#< 10', '0.0130', ' 0.0130###'), ('@>10', '-Inf', '@-Infinity'), ('#>5', '-Inf', '-Infinity'), ('?^5', '123', '?123?'), ('%^6', '123', '%123%%'), (' ^6', '-45.6', '-45.6 '), ('/=10', '-45.6', '-/////45.6'), ('/=+10', '45.6', '+/////45.6'), ('/= 10', '45.6', ' /////45.6'), ('\x00=10', '-inf', '-\x00Infinity'), ('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'), ('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'), ('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'), # thousands separator (',', '1234567', '1,234,567'), (',', '123456', '123,456'), (',', '12345', '12,345'), (',', '1234', '1,234'), (',', '123', '123'), (',', '12', '12'), (',', '1', '1'), (',', '0', '0'), (',', '-1234567', '-1,234,567'), (',', '-123456', '-123,456'), ('7,', '123456', '123,456'), ('8,', '123456', ' 123,456'), ('08,', '123456', '0,123,456'), # special case: extra 0 needed ('+08,', '123456', '+123,456'), # but not if there's a sign (' 08,', '123456', ' 123,456'), ('08,', '-123456', '-123,456'), ('+09,', '123456', '+0,123,456'), # ... with fractional part... ('07,', '1234.56', '1,234.56'), ('08,', '1234.56', '1,234.56'), ('09,', '1234.56', '01,234.56'), ('010,', '1234.56', '001,234.56'), ('011,', '1234.56', '0,001,234.56'), ('012,', '1234.56', '0,001,234.56'), ('08,.1f', '1234.5', '01,234.5'), # no thousands separators in fraction part (',', '1.23456789', '1.23456789'), (',%', '123.456789', '12,345.6789%'), (',e', '123456', '1.23456e+5'), (',E', '123456', '1.23456E+5'), # issue 6850 ('a=-7.0', '0.12345', 'aaaa0.1'), # issue 22090 ('<^+15.20%', 'inf', '<<+Infinity%<<<'), ('\x07>,%', 'sNaN1234567', 'sNaN1234567%'), ('=10.10%', 'NaN123', ' NaN123%'), ] for fmt, d, result in test_values: self.assertEqual(format(Decimal(d), fmt), result) # bytes format argument self.assertRaises(TypeError, Decimal(1).__format__, b'-020') def test_n_format(self): Decimal = self.decimal.Decimal try: from locale import CHAR_MAX except ImportError: self.skipTest('locale.CHAR_MAX not available') def make_grouping(lst): return ''.join([chr(x) for x in lst]) if self.decimal == C else lst def get_fmt(x, override=None, fmt='n'): if self.decimal == C: return Decimal(x).__format__(fmt, override) else: return Decimal(x).__format__(fmt, _localeconv=override) # Set up some localeconv-like dictionaries en_US = { 'decimal_point' : '.', 'grouping' : make_grouping([3, 3, 0]), 'thousands_sep' : ',' } fr_FR = { 'decimal_point' : ',', 'grouping' : make_grouping([CHAR_MAX]), 'thousands_sep' : '' } ru_RU = { 'decimal_point' : ',', 'grouping': make_grouping([3, 3, 0]), 'thousands_sep' : ' ' } crazy = { 'decimal_point' : '&', 'grouping': make_grouping([1, 4, 2, CHAR_MAX]), 'thousands_sep' : '-' } dotsep_wide = { 'decimal_point' : b'\xc2\xbf'.decode('utf-8'), 'grouping': make_grouping([3, 3, 0]), 'thousands_sep' : b'\xc2\xb4'.decode('utf-8') } self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7') self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7') self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7') self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7') self.assertEqual(get_fmt(123456789, en_US), '123,456,789') self.assertEqual(get_fmt(123456789, fr_FR), '123456789') self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789') self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3') self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8') self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8') self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8') self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8') # zero padding self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234') self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234') self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234') self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234') self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345') self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345') self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345') self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345') self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345') self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345') self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6') self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6') # wide char separator and decimal point self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'), '-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5') @run_with_locale('LC_ALL', 'ps_AF') def test_wide_char_separator_decimal_point(self): # locale with wide char separator and decimal point import locale Decimal = self.decimal.Decimal decimal_point = locale.localeconv()['decimal_point'] thousands_sep = locale.localeconv()['thousands_sep'] if decimal_point != '\u066b': self.skipTest('inappropriate decimal point separator' '({!a} not {!a})'.format(decimal_point, '\u066b')) if thousands_sep != '\u066c': self.skipTest('inappropriate thousands separator' '({!a} not {!a})'.format(thousands_sep, '\u066c')) self.assertEqual(format(Decimal('100000000.123'), 'n'), '100\u066c000\u066c000\u066b123') class CFormatTest(FormatTest): decimal = C class PyFormatTest(FormatTest): decimal = P class ArithmeticOperatorsTest(unittest.TestCase): '''Unit tests for all arithmetic operators, binary and unary.''' def test_addition(self): Decimal = self.decimal.Decimal d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1+d2, Decimal('11.1')) self.assertEqual(d2+d1, Decimal('11.1')) #with other type, left c = d1 + 5 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 + d1 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 += d2 self.assertEqual(d1, Decimal('11.1')) #inline with other type d1 += 5 self.assertEqual(d1, Decimal('16.1')) def test_subtraction(self): Decimal = self.decimal.Decimal d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1-d2, Decimal('-33.3')) self.assertEqual(d2-d1, Decimal('33.3')) #with other type, left c = d1 - 5 self.assertEqual(c, Decimal('-16.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 - d1 self.assertEqual(c, Decimal('16.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 -= d2 self.assertEqual(d1, Decimal('-33.3')) #inline with other type d1 -= 5 self.assertEqual(d1, Decimal('-38.3')) def test_multiplication(self): Decimal = self.decimal.Decimal d1 = Decimal('-5') d2 = Decimal('3') #two Decimals self.assertEqual(d1*d2, Decimal('-15')) self.assertEqual(d2*d1, Decimal('-15')) #with other type, left c = d1 * 5 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 * d1 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 *= d2 self.assertEqual(d1, Decimal('-15')) #inline with other type d1 *= 5 self.assertEqual(d1, Decimal('-75')) def test_division(self): Decimal = self.decimal.Decimal d1 = Decimal('-5') d2 = Decimal('2') #two Decimals self.assertEqual(d1/d2, Decimal('-2.5')) self.assertEqual(d2/d1, Decimal('-0.4')) #with other type, left c = d1 / 4 self.assertEqual(c, Decimal('-1.25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 4 / d1 self.assertEqual(c, Decimal('-0.8')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 /= d2 self.assertEqual(d1, Decimal('-2.5')) #inline with other type d1 /= 4 self.assertEqual(d1, Decimal('-0.625')) def test_floor_division(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1//d2, Decimal('2')) self.assertEqual(d2//d1, Decimal('0')) #with other type, left c = d1 // 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 // d1 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 //= d2 self.assertEqual(d1, Decimal('2')) #inline with other type d1 //= 2 self.assertEqual(d1, Decimal('1')) def test_powering(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1**d2, Decimal('25')) self.assertEqual(d2**d1, Decimal('32')) #with other type, left c = d1 ** 4 self.assertEqual(c, Decimal('625')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 ** d1 self.assertEqual(c, Decimal('16807')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 **= d2 self.assertEqual(d1, Decimal('25')) #inline with other type d1 **= 4 self.assertEqual(d1, Decimal('390625')) def test_module(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1%d2, Decimal('1')) self.assertEqual(d2%d1, Decimal('2')) #with other type, left c = d1 % 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 % d1 self.assertEqual(c, Decimal('2')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 %= d2 self.assertEqual(d1, Decimal('1')) #inline with other type d1 %= 4 self.assertEqual(d1, Decimal('1')) def test_floor_div_module(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals (p, q) = divmod(d1, d2) self.assertEqual(p, Decimal('2')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, left (p, q) = divmod(d1, 4) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, right (p, q) = divmod(7, d1) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('2')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) def test_unary_operators(self): Decimal = self.decimal.Decimal self.assertEqual(+Decimal(45), Decimal(+45)) # + self.assertEqual(-Decimal(45), Decimal(-45)) # - self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs def test_nan_comparisons(self): # comparisons involving signaling nans signal InvalidOperation # order comparisons (<, <=, >, >=) involving only quiet nans # also signal InvalidOperation # equality comparisons (==, !=) involving only quiet nans # don't signal, but return False or True respectively. Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation localcontext = self.decimal.localcontext n = Decimal('NaN') s = Decimal('sNaN') i = Decimal('Inf') f = Decimal('2') qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n) snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s) order_ops = operator.lt, operator.le, operator.gt, operator.ge equality_ops = operator.eq, operator.ne # results when InvalidOperation is not trapped for x, y in qnan_pairs + snan_pairs: for op in order_ops + equality_ops: got = op(x, y) expected = True if op is operator.ne else False self.assertIs(expected, got, "expected {0!r} for operator.{1}({2!r}, {3!r}); " "got {4!r}".format( expected, op.__name__, x, y, got)) # repeat the above, but this time trap the InvalidOperation with localcontext() as ctx: ctx.traps[InvalidOperation] = 1 for x, y in qnan_pairs: for op in equality_ops: got = op(x, y) expected = True if op is operator.ne else False self.assertIs(expected, got, "expected {0!r} for " "operator.{1}({2!r}, {3!r}); " "got {4!r}".format( expected, op.__name__, x, y, got)) for x, y in snan_pairs: for op in equality_ops: self.assertRaises(InvalidOperation, operator.eq, x, y) self.assertRaises(InvalidOperation, operator.ne, x, y) for x, y in qnan_pairs + snan_pairs: for op in order_ops: self.assertRaises(InvalidOperation, op, x, y) def test_copy_sign(self): Decimal = self.decimal.Decimal d = Decimal(1).copy_sign(Decimal(-2)) self.assertEqual(Decimal(1).copy_sign(-2), d) self.assertRaises(TypeError, Decimal(1).copy_sign, '-2') class CArithmeticOperatorsTest(ArithmeticOperatorsTest): decimal = C class PyArithmeticOperatorsTest(ArithmeticOperatorsTest): decimal = P # The following are two functions used to test threading in the next class def thfunc1(cls): Decimal = cls.decimal.Decimal InvalidOperation = cls.decimal.InvalidOperation DivisionByZero = cls.decimal.DivisionByZero Overflow = cls.decimal.Overflow Underflow = cls.decimal.Underflow Inexact = cls.decimal.Inexact getcontext = cls.decimal.getcontext localcontext = cls.decimal.localcontext d1 = Decimal(1) d3 = Decimal(3) test1 = d1/d3 cls.finish1.set() cls.synchro.wait() test2 = d1/d3 with localcontext() as c2: cls.assertTrue(c2.flags[Inexact]) cls.assertRaises(DivisionByZero, c2.divide, d1, 0) cls.assertTrue(c2.flags[DivisionByZero]) with localcontext() as c3: cls.assertTrue(c3.flags[Inexact]) cls.assertTrue(c3.flags[DivisionByZero]) cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN')) cls.assertTrue(c3.flags[InvalidOperation]) del c3 cls.assertFalse(c2.flags[InvalidOperation]) del c2 cls.assertEqual(test1, Decimal('0.333333333333333333333333')) cls.assertEqual(test2, Decimal('0.333333333333333333333333')) c1 = getcontext() cls.assertTrue(c1.flags[Inexact]) for sig in Overflow, Underflow, DivisionByZero, InvalidOperation: cls.assertFalse(c1.flags[sig]) def thfunc2(cls): Decimal = cls.decimal.Decimal InvalidOperation = cls.decimal.InvalidOperation DivisionByZero = cls.decimal.DivisionByZero Overflow = cls.decimal.Overflow Underflow = cls.decimal.Underflow Inexact = cls.decimal.Inexact getcontext = cls.decimal.getcontext localcontext = cls.decimal.localcontext d1 = Decimal(1) d3 = Decimal(3) test1 = d1/d3 thiscontext = getcontext() thiscontext.prec = 18 test2 = d1/d3 with localcontext() as c2: cls.assertTrue(c2.flags[Inexact]) cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999) cls.assertTrue(c2.flags[Overflow]) with localcontext(thiscontext) as c3: cls.assertTrue(c3.flags[Inexact]) cls.assertFalse(c3.flags[Overflow]) c3.traps[Underflow] = True cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999) cls.assertTrue(c3.flags[Underflow]) del c3 cls.assertFalse(c2.flags[Underflow]) cls.assertFalse(c2.traps[Underflow]) del c2 cls.synchro.set() cls.finish2.set() cls.assertEqual(test1, Decimal('0.333333333333333333333333')) cls.assertEqual(test2, Decimal('0.333333333333333333')) cls.assertFalse(thiscontext.traps[Underflow]) cls.assertTrue(thiscontext.flags[Inexact]) for sig in Overflow, Underflow, DivisionByZero, InvalidOperation: cls.assertFalse(thiscontext.flags[sig]) class ThreadingTest(unittest.TestCase): '''Unit tests for thread local contexts in Decimal.''' # Take care executing this test from IDLE, there's an issue in threading # that hangs IDLE and I couldn't find it def test_threading(self): DefaultContext = self.decimal.DefaultContext if self.decimal == C and not self.decimal.HAVE_THREADS: self.skipTest("compiled without threading") # Test the "threading isolation" of a Context. Also test changing # the DefaultContext, which acts as a template for the thread-local # contexts. save_prec = DefaultContext.prec save_emax = DefaultContext.Emax save_emin = DefaultContext.Emin DefaultContext.prec = 24 DefaultContext.Emax = 425000000 DefaultContext.Emin = -425000000 self.synchro = threading.Event() self.finish1 = threading.Event() self.finish2 = threading.Event() th1 = threading.Thread(target=thfunc1, args=(self,)) th2 = threading.Thread(target=thfunc2, args=(self,)) th1.start() th2.start() self.finish1.wait() self.finish2.wait() for sig in Signals[self.decimal]: self.assertFalse(DefaultContext.flags[sig]) DefaultContext.prec = save_prec DefaultContext.Emax = save_emax DefaultContext.Emin = save_emin @unittest.skipUnless(threading, 'threading required') class CThreadingTest(ThreadingTest): decimal = C @unittest.skipUnless(threading, 'threading required') class PyThreadingTest(ThreadingTest): decimal = P class UsabilityTest(unittest.TestCase): '''Unit tests for Usability cases of Decimal.''' def test_comparison_operators(self): Decimal = self.decimal.Decimal da = Decimal('23.42') db = Decimal('23.42') dc = Decimal('45') #two Decimals self.assertGreater(dc, da) self.assertGreaterEqual(dc, da) self.assertLess(da, dc) self.assertLessEqual(da, dc) self.assertEqual(da, db) self.assertNotEqual(da, dc) self.assertLessEqual(da, db) self.assertGreaterEqual(da, db) #a Decimal and an int self.assertGreater(dc, 23) self.assertLess(23, dc) self.assertEqual(dc, 45) #a Decimal and uncomparable self.assertNotEqual(da, 'ugly') self.assertNotEqual(da, 32.7) self.assertNotEqual(da, object()) self.assertNotEqual(da, object) # sortable a = list(map(Decimal, range(100))) b = a[:] random.shuffle(a) a.sort() self.assertEqual(a, b) def test_decimal_float_comparison(self): Decimal = self.decimal.Decimal da = Decimal('0.25') db = Decimal('3.0') self.assertLess(da, 3.0) self.assertLessEqual(da, 3.0) self.assertGreater(db, 0.25) self.assertGreaterEqual(db, 0.25) self.assertNotEqual(da, 1.5) self.assertEqual(da, 0.25) self.assertGreater(3.0, da) self.assertGreaterEqual(3.0, da) self.assertLess(0.25, db) self.assertLessEqual(0.25, db) self.assertNotEqual(0.25, db) self.assertEqual(3.0, db) self.assertNotEqual(0.1, Decimal('0.1')) def test_decimal_complex_comparison(self): Decimal = self.decimal.Decimal da = Decimal('0.25') db = Decimal('3.0') self.assertNotEqual(da, (1.5+0j)) self.assertNotEqual((1.5+0j), da) self.assertEqual(da, (0.25+0j)) self.assertEqual((0.25+0j), da) self.assertEqual((3.0+0j), db) self.assertEqual(db, (3.0+0j)) self.assertNotEqual(db, (3.0+1j)) self.assertNotEqual((3.0+1j), db) self.assertIs(db.__lt__(3.0+0j), NotImplemented) self.assertIs(db.__le__(3.0+0j), NotImplemented) self.assertIs(db.__gt__(3.0+0j), NotImplemented) self.assertIs(db.__le__(3.0+0j), NotImplemented) def test_decimal_fraction_comparison(self): D = self.decimal.Decimal F = fractions[self.decimal].Fraction Context = self.decimal.Context localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation emax = C.MAX_EMAX if C else 999999999 emin = C.MIN_EMIN if C else -999999999 etiny = C.MIN_ETINY if C else -1999999997 c = Context(Emax=emax, Emin=emin) with localcontext(c): c.prec = emax self.assertLess(D(0), F(1,9999999999999999999999999999999999999)) self.assertLess(F(-1,9999999999999999999999999999999999999), D(0)) self.assertLess(F(0,1), D("1e" + str(etiny))) self.assertLess(D("-1e" + str(etiny)), F(0,1)) self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny))) self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999)) self.assertEqual(D("0.1"), F(1,10)) self.assertEqual(F(1,10), D("0.1")) c.prec = 300 self.assertNotEqual(D(1)/3, F(1,3)) self.assertNotEqual(F(1,3), D(1)/3) self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax))) self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999)) self.assertGreater(D('inf'), F(99999999999,123)) self.assertGreater(D('inf'), F(-99999999999,123)) self.assertLess(D('-inf'), F(99999999999,123)) self.assertLess(D('-inf'), F(-99999999999,123)) self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123)) self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan'))) self.assertNotEqual(D('nan'), F(-9,123)) self.assertNotEqual(F(-9,123), D('nan')) def test_copy_and_deepcopy_methods(self): Decimal = self.decimal.Decimal d = Decimal('43.24') c = copy.copy(d) self.assertEqual(id(c), id(d)) dc = copy.deepcopy(d) self.assertEqual(id(dc), id(d)) def test_hash_method(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext def hashit(d): a = hash(d) b = d.__hash__() self.assertEqual(a, b) return a #just that it's hashable hashit(Decimal(23)) hashit(Decimal('Infinity')) hashit(Decimal('-Infinity')) hashit(Decimal('nan123')) hashit(Decimal('-NaN')) test_values = [Decimal(sign*(2**m + n)) for m in [0, 14, 15, 16, 17, 30, 31, 32, 33, 61, 62, 63, 64, 65, 66] for n in range(-10, 10) for sign in [-1, 1]] test_values.extend([ Decimal("-1"), # ==> -2 Decimal("-0"), # zeros Decimal("0.00"), Decimal("-0.000"), Decimal("0E10"), Decimal("-0E12"), Decimal("10.0"), # negative exponent Decimal("-23.00000"), Decimal("1230E100"), # positive exponent Decimal("-4.5678E50"), # a value for which hash(n) != hash(n % (2**64-1)) # in Python pre-2.6 Decimal(2**64 + 2**32 - 1), # selection of values which fail with the old (before # version 2.6) long.__hash__ Decimal("1.634E100"), Decimal("90.697E100"), Decimal("188.83E100"), Decimal("1652.9E100"), Decimal("56531E100"), ]) # check that hash(d) == hash(int(d)) for integral values for value in test_values: self.assertEqual(hashit(value), hashit(int(value))) #the same hash that to an int self.assertEqual(hashit(Decimal(23)), hashit(23)) self.assertRaises(TypeError, hash, Decimal('sNaN')) self.assertTrue(hashit(Decimal('Inf'))) self.assertTrue(hashit(Decimal('-Inf'))) # check that the hashes of a Decimal float match when they # represent exactly the same values test_strings = ['inf', '-Inf', '0.0', '-.0e1', '34.0', '2.5', '112390.625', '-0.515625'] for s in test_strings: f = float(s) d = Decimal(s) self.assertEqual(hashit(f), hashit(d)) with localcontext() as c: # check that the value of the hash doesn't depend on the # current context (issue #1757) x = Decimal("123456789.1") c.prec = 6 h1 = hashit(x) c.prec = 10 h2 = hashit(x) c.prec = 16 h3 = hashit(x) self.assertEqual(h1, h2) self.assertEqual(h1, h3) c.prec = 10000 x = 1100 ** 1248 self.assertEqual(hashit(Decimal(x)), hashit(x)) def test_min_and_max_methods(self): Decimal = self.decimal.Decimal d1 = Decimal('15.32') d2 = Decimal('28.5') l1 = 15 l2 = 28 #between Decimals self.assertIs(min(d1,d2), d1) self.assertIs(min(d2,d1), d1) self.assertIs(max(d1,d2), d2) self.assertIs(max(d2,d1), d2) #between Decimal and int self.assertIs(min(d1,l2), d1) self.assertIs(min(l2,d1), d1) self.assertIs(max(l1,d2), d2) self.assertIs(max(d2,l1), d2) def test_as_nonzero(self): Decimal = self.decimal.Decimal #as false self.assertFalse(Decimal(0)) #as true self.assertTrue(Decimal('0.372')) def test_tostring_methods(self): #Test str and repr methods. Decimal = self.decimal.Decimal d = Decimal('15.32') self.assertEqual(str(d), '15.32') # str self.assertEqual(repr(d), "Decimal('15.32')") # repr def test_tonum_methods(self): #Test float and int methods. Decimal = self.decimal.Decimal d1 = Decimal('66') d2 = Decimal('15.32') #int self.assertEqual(int(d1), 66) self.assertEqual(int(d2), 15) #float self.assertEqual(float(d1), 66) self.assertEqual(float(d2), 15.32) #floor test_pairs = [ ('123.00', 123), ('3.2', 3), ('3.54', 3), ('3.899', 3), ('-2.3', -3), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ('89891211712379812736.1', 89891211712379812736), ] for d, i in test_pairs: self.assertEqual(math.floor(Decimal(d)), i) self.assertRaises(ValueError, math.floor, Decimal('-NaN')) self.assertRaises(ValueError, math.floor, Decimal('sNaN')) self.assertRaises(ValueError, math.floor, Decimal('NaN123')) self.assertRaises(OverflowError, math.floor, Decimal('Inf')) self.assertRaises(OverflowError, math.floor, Decimal('-Inf')) #ceiling test_pairs = [ ('123.00', 123), ('3.2', 4), ('3.54', 4), ('3.899', 4), ('-2.3', -2), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ('89891211712379812736.1', 89891211712379812737), ] for d, i in test_pairs: self.assertEqual(math.ceil(Decimal(d)), i) self.assertRaises(ValueError, math.ceil, Decimal('-NaN')) self.assertRaises(ValueError, math.ceil, Decimal('sNaN')) self.assertRaises(ValueError, math.ceil, Decimal('NaN123')) self.assertRaises(OverflowError, math.ceil, Decimal('Inf')) self.assertRaises(OverflowError, math.ceil, Decimal('-Inf')) #round, single argument test_pairs = [ ('123.00', 123), ('3.2', 3), ('3.54', 4), ('3.899', 4), ('-2.3', -2), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ('-3.5', -4), ('-2.5', -2), ('-1.5', -2), ('-0.5', 0), ('0.5', 0), ('1.5', 2), ('2.5', 2), ('3.5', 4), ] for d, i in test_pairs: self.assertEqual(round(Decimal(d)), i) self.assertRaises(ValueError, round, Decimal('-NaN')) self.assertRaises(ValueError, round, Decimal('sNaN')) self.assertRaises(ValueError, round, Decimal('NaN123')) self.assertRaises(OverflowError, round, Decimal('Inf')) self.assertRaises(OverflowError, round, Decimal('-Inf')) #round, two arguments; this is essentially equivalent #to quantize, which is already extensively tested test_triples = [ ('123.456', -4, '0E+4'), ('123.456', -3, '0E+3'), ('123.456', -2, '1E+2'), ('123.456', -1, '1.2E+2'), ('123.456', 0, '123'), ('123.456', 1, '123.5'), ('123.456', 2, '123.46'), ('123.456', 3, '123.456'), ('123.456', 4, '123.4560'), ('123.455', 2, '123.46'), ('123.445', 2, '123.44'), ('Inf', 4, 'NaN'), ('-Inf', -23, 'NaN'), ('sNaN314', 3, 'NaN314'), ] for d, n, r in test_triples: self.assertEqual(str(round(Decimal(d), n)), r) def test_nan_to_float(self): # Test conversions of decimal NANs to float. # See http://bugs.python.org/issue15544 Decimal = self.decimal.Decimal for s in ('nan', 'nan1234', '-nan', '-nan2468'): f = float(Decimal(s)) self.assertTrue(math.isnan(f)) sign = math.copysign(1.0, f) self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0) def test_snan_to_float(self): Decimal = self.decimal.Decimal for s in ('snan', '-snan', 'snan1357', '-snan1234'): d = Decimal(s) self.assertRaises(ValueError, float, d) def test_eval_round_trip(self): Decimal = self.decimal.Decimal #with zero d = Decimal( (0, (0,), 0) ) self.assertEqual(d, eval(repr(d))) #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(d, eval(repr(d))) #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(d, eval(repr(d))) #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(d, eval(repr(d))) def test_as_tuple(self): Decimal = self.decimal.Decimal #with zero d = Decimal(0) self.assertEqual(d.as_tuple(), (0, (0,), 0) ) #int d = Decimal(-45) self.assertEqual(d.as_tuple(), (1, (4, 5), 0) ) #complicated string d = Decimal("-4.34913534E-17") self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) # The '0' coefficient is implementation specific to decimal.py. # It has no meaning in the C-version and is ignored there. d = Decimal("Infinity") self.assertEqual(d.as_tuple(), (0, (0,), 'F') ) #leading zeros in coefficient should be stripped d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) ) self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) ) d = Decimal( (1, (0, 0, 0), 37) ) self.assertEqual(d.as_tuple(), (1, (0,), 37)) d = Decimal( (1, (), 37) ) self.assertEqual(d.as_tuple(), (1, (0,), 37)) #leading zeros in NaN diagnostic info should be stripped d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') ) self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') ) d = Decimal( (1, (0, 0, 0), 'N') ) self.assertEqual(d.as_tuple(), (1, (), 'N') ) d = Decimal( (1, (), 'n') ) self.assertEqual(d.as_tuple(), (1, (), 'n') ) # For infinities, decimal.py has always silently accepted any # coefficient tuple. d = Decimal( (0, (0,), 'F') ) self.assertEqual(d.as_tuple(), (0, (0,), 'F')) d = Decimal( (0, (4, 5, 3, 4), 'F') ) self.assertEqual(d.as_tuple(), (0, (0,), 'F')) d = Decimal( (1, (0, 2, 7, 1), 'F') ) self.assertEqual(d.as_tuple(), (1, (0,), 'F')) def test_subclassing(self): # Different behaviours when subclassing Decimal Decimal = self.decimal.Decimal class MyDecimal(Decimal): y = None d1 = MyDecimal(1) d2 = MyDecimal(2) d = d1 + d2 self.assertIs(type(d), Decimal) d = d1.max(d2) self.assertIs(type(d), Decimal) d = copy.copy(d1) self.assertIs(type(d), MyDecimal) self.assertEqual(d, d1) d = copy.deepcopy(d1) self.assertIs(type(d), MyDecimal) self.assertEqual(d, d1) # Decimal(Decimal) d = Decimal('1.0') x = Decimal(d) self.assertIs(type(x), Decimal) self.assertEqual(x, d) # MyDecimal(Decimal) m = MyDecimal(d) self.assertIs(type(m), MyDecimal) self.assertEqual(m, d) self.assertIs(m.y, None) # Decimal(MyDecimal) x = Decimal(m) self.assertIs(type(x), Decimal) self.assertEqual(x, d) # MyDecimal(MyDecimal) m.y = 9 x = MyDecimal(m) self.assertIs(type(x), MyDecimal) self.assertEqual(x, d) self.assertIs(x.y, None) def test_implicit_context(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext # Check results when context given implicitly. (Issue 2478) c = getcontext() self.assertEqual(str(Decimal(0).sqrt()), str(c.sqrt(Decimal(0)))) def test_none_args(self): Decimal = self.decimal.Decimal Context = self.decimal.Context localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow Underflow = self.decimal.Underflow Subnormal = self.decimal.Subnormal Inexact = self.decimal.Inexact Rounded = self.decimal.Rounded Clamped = self.decimal.Clamped with localcontext(Context()) as c: c.prec = 7 c.Emax = 999 c.Emin = -999 x = Decimal("111") y = Decimal("1e9999") z = Decimal("1e-9999") ##### Unary functions c.clear_flags() self.assertEqual(str(x.exp(context=None)), '1.609487E+48') self.assertTrue(c.flags[Inexact]) self.assertTrue(c.flags[Rounded]) c.clear_flags() self.assertRaises(Overflow, y.exp, context=None) self.assertTrue(c.flags[Overflow]) self.assertIs(z.is_normal(context=None), False) self.assertIs(z.is_subnormal(context=None), True) c.clear_flags() self.assertEqual(str(x.ln(context=None)), '4.709530') self.assertTrue(c.flags[Inexact]) self.assertTrue(c.flags[Rounded]) c.clear_flags() self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() self.assertEqual(str(x.log10(context=None)), '2.045323') self.assertTrue(c.flags[Inexact]) self.assertTrue(c.flags[Rounded]) c.clear_flags() self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() self.assertEqual(str(x.logb(context=None)), '2') self.assertRaises(DivisionByZero, Decimal(0).logb, context=None) self.assertTrue(c.flags[DivisionByZero]) c.clear_flags() self.assertEqual(str(x.logical_invert(context=None)), '1111000') self.assertRaises(InvalidOperation, y.logical_invert, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999') self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() self.assertEqual(str(y.next_plus(context=None)), 'Infinity') self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() self.assertEqual(str(z.normalize(context=None)), '0') self.assertRaises(Overflow, y.normalize, context=None) self.assertTrue(c.flags[Overflow]) self.assertEqual(str(z.number_class(context=None)), '+Subnormal') c.clear_flags() self.assertEqual(str(z.sqrt(context=None)), '0E-1005') self.assertTrue(c.flags[Clamped]) self.assertTrue(c.flags[Inexact]) self.assertTrue(c.flags[Rounded]) self.assertTrue(c.flags[Subnormal]) self.assertTrue(c.flags[Underflow]) c.clear_flags() self.assertRaises(Overflow, y.sqrt, context=None) self.assertTrue(c.flags[Overflow]) c.capitals = 0 self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999') c.capitals = 1 ##### Binary functions c.clear_flags() ans = str(x.compare(Decimal('Nan891287828'), context=None)) self.assertEqual(ans, 'NaN1287828') self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.compare_signal(8224, context=None)) self.assertEqual(ans, '-1') self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.logical_and(101, context=None)) self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.logical_and, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.logical_or(101, context=None)) self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.logical_or, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.logical_xor(101, context=None)) self.assertEqual(ans, '10') self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.max(101, context=None)) self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.max_mag(101, context=None)) self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.min(101, context=None)) self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.min_mag(101, context=None)) self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.remainder_near(101, context=None)) self.assertEqual(ans, '10') self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.rotate(2, context=None)) self.assertEqual(ans, '11100') self.assertRaises(InvalidOperation, x.rotate, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.scaleb(7, context=None)) self.assertEqual(ans, '1.11E+9') self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() ans = str(x.shift(2, context=None)) self.assertEqual(ans, '11100') self.assertRaises(InvalidOperation, x.shift, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) ##### Ternary functions c.clear_flags() ans = str(x.fma(2, 3, context=None)) self.assertEqual(ans, '225') self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None) self.assertTrue(c.flags[Overflow]) ##### Special cases c.rounding = ROUND_HALF_EVEN ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) self.assertEqual(ans, '2') c.rounding = ROUND_DOWN ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) self.assertEqual(ans, '1') ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None)) self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) self.assertEqual(ans, '2') c.rounding = ROUND_DOWN ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) self.assertEqual(ans, '1') ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None)) self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) self.assertEqual(ans, '2') c.rounding = ROUND_DOWN ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) self.assertEqual(ans, '1') ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None)) self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_UP ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) self.assertEqual(ans, '1.501') c.rounding = ROUND_DOWN ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) self.assertEqual(ans, '1.500') ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None)) self.assertEqual(ans, '1.501') c.clear_flags() self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None) self.assertTrue(c.flags[InvalidOperation]) with localcontext(Context()) as context: context.prec = 7 context.Emax = 999 context.Emin = -999 with localcontext(ctx=None) as c: self.assertEqual(c.prec, 7) self.assertEqual(c.Emax, 999) self.assertEqual(c.Emin, -999) def test_conversions_from_int(self): # Check that methods taking a second Decimal argument will # always accept an integer in place of a Decimal. Decimal = self.decimal.Decimal self.assertEqual(Decimal(4).compare(3), Decimal(4).compare(Decimal(3))) self.assertEqual(Decimal(4).compare_signal(3), Decimal(4).compare_signal(Decimal(3))) self.assertEqual(Decimal(4).compare_total(3), Decimal(4).compare_total(Decimal(3))) self.assertEqual(Decimal(4).compare_total_mag(3), Decimal(4).compare_total_mag(Decimal(3))) self.assertEqual(Decimal(10101).logical_and(1001), Decimal(10101).logical_and(Decimal(1001))) self.assertEqual(Decimal(10101).logical_or(1001), Decimal(10101).logical_or(Decimal(1001))) self.assertEqual(Decimal(10101).logical_xor(1001), Decimal(10101).logical_xor(Decimal(1001))) self.assertEqual(Decimal(567).max(123), Decimal(567).max(Decimal(123))) self.assertEqual(Decimal(567).max_mag(123), Decimal(567).max_mag(Decimal(123))) self.assertEqual(Decimal(567).min(123), Decimal(567).min(Decimal(123))) self.assertEqual(Decimal(567).min_mag(123), Decimal(567).min_mag(Decimal(123))) self.assertEqual(Decimal(567).next_toward(123), Decimal(567).next_toward(Decimal(123))) self.assertEqual(Decimal(1234).quantize(100), Decimal(1234).quantize(Decimal(100))) self.assertEqual(Decimal(768).remainder_near(1234), Decimal(768).remainder_near(Decimal(1234))) self.assertEqual(Decimal(123).rotate(1), Decimal(123).rotate(Decimal(1))) self.assertEqual(Decimal(1234).same_quantum(1000), Decimal(1234).same_quantum(Decimal(1000))) self.assertEqual(Decimal('9.123').scaleb(-100), Decimal('9.123').scaleb(Decimal(-100))) self.assertEqual(Decimal(456).shift(-1), Decimal(456).shift(Decimal(-1))) self.assertEqual(Decimal(-12).fma(Decimal(45), 67), Decimal(-12).fma(Decimal(45), Decimal(67))) self.assertEqual(Decimal(-12).fma(45, 67), Decimal(-12).fma(Decimal(45), Decimal(67))) self.assertEqual(Decimal(-12).fma(45, Decimal(67)), Decimal(-12).fma(Decimal(45), Decimal(67))) class CUsabilityTest(UsabilityTest): decimal = C class PyUsabilityTest(UsabilityTest): decimal = P class PythonAPItests(unittest.TestCase): def test_abc(self): Decimal = self.decimal.Decimal self.assertTrue(issubclass(Decimal, numbers.Number)) self.assertFalse(issubclass(Decimal, numbers.Real)) self.assertIsInstance(Decimal(0), numbers.Number) self.assertNotIsInstance(Decimal(0), numbers.Real) def test_pickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): Decimal = self.decimal.Decimal savedecimal = sys.modules['decimal'] # Round trip sys.modules['decimal'] = self.decimal d = Decimal('-3.141590000') p = pickle.dumps(d, proto) e = pickle.loads(p) self.assertEqual(d, e) if C: # Test interchangeability x = C.Decimal('-3.123e81723') y = P.Decimal('-3.123e81723') sys.modules['decimal'] = C sx = pickle.dumps(x, proto) sys.modules['decimal'] = P r = pickle.loads(sx) self.assertIsInstance(r, P.Decimal) self.assertEqual(r, y) sys.modules['decimal'] = P sy = pickle.dumps(y, proto) sys.modules['decimal'] = C r = pickle.loads(sy) self.assertIsInstance(r, C.Decimal) self.assertEqual(r, x) x = C.Decimal('-3.123e81723').as_tuple() y = P.Decimal('-3.123e81723').as_tuple() sys.modules['decimal'] = C sx = pickle.dumps(x, proto) sys.modules['decimal'] = P r = pickle.loads(sx) self.assertIsInstance(r, P.DecimalTuple) self.assertEqual(r, y) sys.modules['decimal'] = P sy = pickle.dumps(y, proto) sys.modules['decimal'] = C r = pickle.loads(sy) self.assertIsInstance(r, C.DecimalTuple) self.assertEqual(r, x) sys.modules['decimal'] = savedecimal def test_int(self): Decimal = self.decimal.Decimal for x in range(-250, 250): s = '%0.2f' % (x / 100.0) # should work the same as for floats self.assertEqual(int(Decimal(s)), int(float(s))) # should work the same as to_integral in the ROUND_DOWN mode d = Decimal(s) r = d.to_integral(ROUND_DOWN) self.assertEqual(Decimal(int(d)), r) self.assertRaises(ValueError, int, Decimal('-nan')) self.assertRaises(ValueError, int, Decimal('snan')) self.assertRaises(OverflowError, int, Decimal('inf')) self.assertRaises(OverflowError, int, Decimal('-inf')) def test_trunc(self): Decimal = self.decimal.Decimal for x in range(-250, 250): s = '%0.2f' % (x / 100.0) # should work the same as for floats self.assertEqual(int(Decimal(s)), int(float(s))) # should work the same as to_integral in the ROUND_DOWN mode d = Decimal(s) r = d.to_integral(ROUND_DOWN) self.assertEqual(Decimal(math.trunc(d)), r) def test_from_float(self): Decimal = self.decimal.Decimal class MyDecimal(Decimal): pass self.assertTrue(issubclass(MyDecimal, Decimal)) r = MyDecimal.from_float(0.1) self.assertEqual(type(r), MyDecimal) self.assertEqual(str(r), '0.1000000000000000055511151231257827021181583404541015625') bigint = 12345678901234567890123456789 self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint)) self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan()) self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite()) self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite()) self.assertEqual(str(MyDecimal.from_float(float('nan'))), str(Decimal('NaN'))) self.assertEqual(str(MyDecimal.from_float(float('inf'))), str(Decimal('Infinity'))) self.assertEqual(str(MyDecimal.from_float(float('-inf'))), str(Decimal('-Infinity'))) self.assertRaises(TypeError, MyDecimal.from_float, 'abc') for i in range(200): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip def test_create_decimal_from_float(self): Decimal = self.decimal.Decimal Context = self.decimal.Context Inexact = self.decimal.Inexact context = Context(prec=5, rounding=ROUND_DOWN) self.assertEqual( context.create_decimal_from_float(math.pi), Decimal('3.1415') ) context = Context(prec=5, rounding=ROUND_UP) self.assertEqual( context.create_decimal_from_float(math.pi), Decimal('3.1416') ) context = Context(prec=5, traps=[Inexact]) self.assertRaises( Inexact, context.create_decimal_from_float, math.pi ) self.assertEqual(repr(context.create_decimal_from_float(-0.0)), "Decimal('-0')") self.assertEqual(repr(context.create_decimal_from_float(1.0)), "Decimal('1')") self.assertEqual(repr(context.create_decimal_from_float(10)), "Decimal('10')") def test_quantize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context InvalidOperation = self.decimal.InvalidOperation c = Context(Emax=99999, Emin=-99999) self.assertEqual( Decimal('7.335').quantize(Decimal('.01')), Decimal('7.34') ) self.assertEqual( Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN), Decimal('7.33') ) self.assertRaises( InvalidOperation, Decimal("10e99999").quantize, Decimal('1e100000'), context=c ) c = Context() d = Decimal("0.871831e800") x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN) self.assertEqual(x, Decimal('8.71E+799')) def test_complex(self): Decimal = self.decimal.Decimal x = Decimal("9.8182731e181273") self.assertEqual(x.real, x) self.assertEqual(x.imag, 0) self.assertEqual(x.conjugate(), x) x = Decimal("1") self.assertEqual(complex(x), complex(float(1))) self.assertRaises(AttributeError, setattr, x, 'real', 100) self.assertRaises(AttributeError, setattr, x, 'imag', 100) self.assertRaises(AttributeError, setattr, x, 'conjugate', 100) self.assertRaises(AttributeError, setattr, x, '__complex__', 100) def test_named_parameters(self): D = self.decimal.Decimal Context = self.decimal.Context localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation Overflow = self.decimal.Overflow xc = Context() xc.prec = 1 xc.Emax = 1 xc.Emin = -1 with localcontext() as c: c.clear_flags() self.assertEqual(D(9, xc), 9) self.assertEqual(D(9, context=xc), 9) self.assertEqual(D(context=xc, value=9), 9) self.assertEqual(D(context=xc), 0) xc.clear_flags() self.assertRaises(InvalidOperation, D, "xyz", context=xc) self.assertTrue(xc.flags[InvalidOperation]) self.assertFalse(c.flags[InvalidOperation]) xc.clear_flags() self.assertEqual(D(2).exp(context=xc), 7) self.assertRaises(Overflow, D(8).exp, context=xc) self.assertTrue(xc.flags[Overflow]) self.assertFalse(c.flags[Overflow]) xc.clear_flags() self.assertEqual(D(2).ln(context=xc), D('0.7')) self.assertRaises(InvalidOperation, D(-1).ln, context=xc) self.assertTrue(xc.flags[InvalidOperation]) self.assertFalse(c.flags[InvalidOperation]) self.assertEqual(D(0).log10(context=xc), D('-inf')) self.assertEqual(D(-1).next_minus(context=xc), -2) self.assertEqual(D(-1).next_plus(context=xc), D('-0.9')) self.assertEqual(D("9.73").normalize(context=xc), D('1E+1')) self.assertEqual(D("9999").to_integral(context=xc), 9999) self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000) self.assertEqual(D("123").to_integral_value(context=xc), 123) self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2')) self.assertEqual(D("0.0625").compare(context=xc, other=3), -1) xc.clear_flags() self.assertRaises(InvalidOperation, D("0").compare_signal, D('nan'), context=xc) self.assertTrue(xc.flags[InvalidOperation]) self.assertFalse(c.flags[InvalidOperation]) self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0')) self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0')) self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc), D('-0.3')) self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0')) self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc), D('0.0')) self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1')) xc.clear_flags() self.assertRaises(InvalidOperation, D("0.2").quantize, D('1e10'), context=xc) self.assertTrue(xc.flags[InvalidOperation]) self.assertFalse(c.flags[InvalidOperation]) self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc), D('-0.5')) self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7), D('7E+1')) self.assertRaises(TypeError, D(1).is_canonical, context=xc) self.assertRaises(TypeError, D(1).is_finite, context=xc) self.assertRaises(TypeError, D(1).is_infinite, context=xc) self.assertRaises(TypeError, D(1).is_nan, context=xc) self.assertRaises(TypeError, D(1).is_qnan, context=xc) self.assertRaises(TypeError, D(1).is_snan, context=xc) self.assertRaises(TypeError, D(1).is_signed, context=xc) self.assertRaises(TypeError, D(1).is_zero, context=xc) self.assertFalse(D("0.01").is_normal(context=xc)) self.assertTrue(D("0.01").is_subnormal(context=xc)) self.assertRaises(TypeError, D(1).adjusted, context=xc) self.assertRaises(TypeError, D(1).conjugate, context=xc) self.assertRaises(TypeError, D(1).radix, context=xc) self.assertEqual(D(-111).logb(context=xc), 2) self.assertEqual(D(0).logical_invert(context=xc), 1) self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal') self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21') self.assertEqual(D('11').logical_and(D('10'), context=xc), 0) self.assertEqual(D('11').logical_or(D('10'), context=xc), 1) self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1) self.assertEqual(D('23').rotate(1, context=xc), 3) self.assertEqual(D('23').rotate(1, context=xc), 3) xc.clear_flags() self.assertRaises(Overflow, D('23').scaleb, 1, context=xc) self.assertTrue(xc.flags[Overflow]) self.assertFalse(c.flags[Overflow]) self.assertEqual(D('23').shift(-1, context=xc), 0) self.assertRaises(TypeError, D.from_float, 1.1, context=xc) self.assertRaises(TypeError, D(0).as_tuple, context=xc) self.assertEqual(D(1).canonical(), 1) self.assertRaises(TypeError, D("-1").copy_abs, context=xc) self.assertRaises(TypeError, D("-1").copy_negate, context=xc) self.assertRaises(TypeError, D(1).canonical, context="x") self.assertRaises(TypeError, D(1).canonical, xyz="x") def test_exception_hierarchy(self): decimal = self.decimal DecimalException = decimal.DecimalException InvalidOperation = decimal.InvalidOperation FloatOperation = decimal.FloatOperation DivisionByZero = decimal.DivisionByZero Overflow = decimal.Overflow Underflow = decimal.Underflow Subnormal = decimal.Subnormal Inexact = decimal.Inexact Rounded = decimal.Rounded Clamped = decimal.Clamped self.assertTrue(issubclass(DecimalException, ArithmeticError)) self.assertTrue(issubclass(InvalidOperation, DecimalException)) self.assertTrue(issubclass(FloatOperation, DecimalException)) self.assertTrue(issubclass(FloatOperation, TypeError)) self.assertTrue(issubclass(DivisionByZero, DecimalException)) self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError)) self.assertTrue(issubclass(Overflow, Rounded)) self.assertTrue(issubclass(Overflow, Inexact)) self.assertTrue(issubclass(Overflow, DecimalException)) self.assertTrue(issubclass(Underflow, Inexact)) self.assertTrue(issubclass(Underflow, Rounded)) self.assertTrue(issubclass(Underflow, Subnormal)) self.assertTrue(issubclass(Underflow, DecimalException)) self.assertTrue(issubclass(Subnormal, DecimalException)) self.assertTrue(issubclass(Inexact, DecimalException)) self.assertTrue(issubclass(Rounded, DecimalException)) self.assertTrue(issubclass(Clamped, DecimalException)) self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation)) self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation)) self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation)) self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError)) self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation)) class CPythonAPItests(PythonAPItests): decimal = C class PyPythonAPItests(PythonAPItests): decimal = P class ContextAPItests(unittest.TestCase): def test_none_args(self): Context = self.decimal.Context InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow c1 = Context() c2 = Context(prec=None, rounding=None, Emax=None, Emin=None, capitals=None, clamp=None, flags=None, traps=None) for c in [c1, c2]: self.assertEqual(c.prec, 28) self.assertEqual(c.rounding, ROUND_HALF_EVEN) self.assertEqual(c.Emax, 999999) self.assertEqual(c.Emin, -999999) self.assertEqual(c.capitals, 1) self.assertEqual(c.clamp, 0) assert_signals(self, c, 'flags', []) assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero, Overflow]) @cpython_only def test_from_legacy_strings(self): import _testcapi c = self.decimal.Context() for rnd in RoundingModes: c.rounding = _testcapi.unicode_legacy_string(rnd) self.assertEqual(c.rounding, rnd) s = _testcapi.unicode_legacy_string('') self.assertRaises(TypeError, setattr, c, 'rounding', s) s = _testcapi.unicode_legacy_string('ROUND_\x00UP') self.assertRaises(TypeError, setattr, c, 'rounding', s) def test_pickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): Context = self.decimal.Context savedecimal = sys.modules['decimal'] # Round trip sys.modules['decimal'] = self.decimal c = Context() e = pickle.loads(pickle.dumps(c, proto)) self.assertEqual(c.prec, e.prec) self.assertEqual(c.Emin, e.Emin) self.assertEqual(c.Emax, e.Emax) self.assertEqual(c.rounding, e.rounding) self.assertEqual(c.capitals, e.capitals) self.assertEqual(c.clamp, e.clamp) self.assertEqual(c.flags, e.flags) self.assertEqual(c.traps, e.traps) # Test interchangeability combinations = [(C, P), (P, C)] if C else [(P, P)] for dumper, loader in combinations: for ri, _ in enumerate(RoundingModes): for fi, _ in enumerate(OrderedSignals[dumper]): for ti, _ in enumerate(OrderedSignals[dumper]): prec = random.randrange(1, 100) emin = random.randrange(-100, 0) emax = random.randrange(1, 100) caps = random.randrange(2) clamp = random.randrange(2) # One module dumps sys.modules['decimal'] = dumper c = dumper.Context( prec=prec, Emin=emin, Emax=emax, rounding=RoundingModes[ri], capitals=caps, clamp=clamp, flags=OrderedSignals[dumper][:fi], traps=OrderedSignals[dumper][:ti] ) s = pickle.dumps(c, proto) # The other module loads sys.modules['decimal'] = loader d = pickle.loads(s) self.assertIsInstance(d, loader.Context) self.assertEqual(d.prec, prec) self.assertEqual(d.Emin, emin) self.assertEqual(d.Emax, emax) self.assertEqual(d.rounding, RoundingModes[ri]) self.assertEqual(d.capitals, caps) self.assertEqual(d.clamp, clamp) assert_signals(self, d, 'flags', OrderedSignals[loader][:fi]) assert_signals(self, d, 'traps', OrderedSignals[loader][:ti]) sys.modules['decimal'] = savedecimal def test_equality_with_other_types(self): Decimal = self.decimal.Decimal self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}]) self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}]) def test_copy(self): # All copies should be deep Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy() self.assertNotEqual(id(c), id(d)) self.assertNotEqual(id(c.flags), id(d.flags)) self.assertNotEqual(id(c.traps), id(d.traps)) k1 = set(c.flags.keys()) k2 = set(d.flags.keys()) self.assertEqual(k1, k2) self.assertEqual(c.flags, d.flags) def test__clamp(self): # In Python 3.2, the private attribute `_clamp` was made # public (issue 8540), with the old `_clamp` becoming a # property wrapping `clamp`. For the duration of Python 3.2 # only, the attribute should be gettable/settable via both # `clamp` and `_clamp`; in Python 3.3, `_clamp` should be # removed. Context = self.decimal.Context c = Context() self.assertRaises(AttributeError, getattr, c, '_clamp') def test_abs(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.abs(Decimal(-1)) self.assertEqual(c.abs(-1), d) self.assertRaises(TypeError, c.abs, '-1') def test_add(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.add(Decimal(1), Decimal(1)) self.assertEqual(c.add(1, 1), d) self.assertEqual(c.add(Decimal(1), 1), d) self.assertEqual(c.add(1, Decimal(1)), d) self.assertRaises(TypeError, c.add, '1', 1) self.assertRaises(TypeError, c.add, 1, '1') def test_compare(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare(Decimal(1), Decimal(1)) self.assertEqual(c.compare(1, 1), d) self.assertEqual(c.compare(Decimal(1), 1), d) self.assertEqual(c.compare(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare, '1', 1) self.assertRaises(TypeError, c.compare, 1, '1') def test_compare_signal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_signal(Decimal(1), Decimal(1)) self.assertEqual(c.compare_signal(1, 1), d) self.assertEqual(c.compare_signal(Decimal(1), 1), d) self.assertEqual(c.compare_signal(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_signal, '1', 1) self.assertRaises(TypeError, c.compare_signal, 1, '1') def test_compare_total(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_total(Decimal(1), Decimal(1)) self.assertEqual(c.compare_total(1, 1), d) self.assertEqual(c.compare_total(Decimal(1), 1), d) self.assertEqual(c.compare_total(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_total, '1', 1) self.assertRaises(TypeError, c.compare_total, 1, '1') def test_compare_total_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_total_mag(Decimal(1), Decimal(1)) self.assertEqual(c.compare_total_mag(1, 1), d) self.assertEqual(c.compare_total_mag(Decimal(1), 1), d) self.assertEqual(c.compare_total_mag(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_total_mag, '1', 1) self.assertRaises(TypeError, c.compare_total_mag, 1, '1') def test_copy_abs(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_abs(Decimal(-1)) self.assertEqual(c.copy_abs(-1), d) self.assertRaises(TypeError, c.copy_abs, '-1') def test_copy_decimal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_decimal(Decimal(-1)) self.assertEqual(c.copy_decimal(-1), d) self.assertRaises(TypeError, c.copy_decimal, '-1') def test_copy_negate(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_negate(Decimal(-1)) self.assertEqual(c.copy_negate(-1), d) self.assertRaises(TypeError, c.copy_negate, '-1') def test_copy_sign(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_sign(Decimal(1), Decimal(-2)) self.assertEqual(c.copy_sign(1, -2), d) self.assertEqual(c.copy_sign(Decimal(1), -2), d) self.assertEqual(c.copy_sign(1, Decimal(-2)), d) self.assertRaises(TypeError, c.copy_sign, '1', -2) self.assertRaises(TypeError, c.copy_sign, 1, '-2') def test_divide(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divide(Decimal(1), Decimal(2)) self.assertEqual(c.divide(1, 2), d) self.assertEqual(c.divide(Decimal(1), 2), d) self.assertEqual(c.divide(1, Decimal(2)), d) self.assertRaises(TypeError, c.divide, '1', 2) self.assertRaises(TypeError, c.divide, 1, '2') def test_divide_int(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divide_int(Decimal(1), Decimal(2)) self.assertEqual(c.divide_int(1, 2), d) self.assertEqual(c.divide_int(Decimal(1), 2), d) self.assertEqual(c.divide_int(1, Decimal(2)), d) self.assertRaises(TypeError, c.divide_int, '1', 2) self.assertRaises(TypeError, c.divide_int, 1, '2') def test_divmod(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divmod(Decimal(1), Decimal(2)) self.assertEqual(c.divmod(1, 2), d) self.assertEqual(c.divmod(Decimal(1), 2), d) self.assertEqual(c.divmod(1, Decimal(2)), d) self.assertRaises(TypeError, c.divmod, '1', 2) self.assertRaises(TypeError, c.divmod, 1, '2') def test_exp(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.exp(Decimal(10)) self.assertEqual(c.exp(10), d) self.assertRaises(TypeError, c.exp, '10') def test_fma(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.fma(Decimal(2), Decimal(3), Decimal(4)) self.assertEqual(c.fma(2, 3, 4), d) self.assertEqual(c.fma(Decimal(2), 3, 4), d) self.assertEqual(c.fma(2, Decimal(3), 4), d) self.assertEqual(c.fma(2, 3, Decimal(4)), d) self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d) self.assertRaises(TypeError, c.fma, '2', 3, 4) self.assertRaises(TypeError, c.fma, 2, '3', 4) self.assertRaises(TypeError, c.fma, 2, 3, '4') # Issue 12079 for Context.fma ... self.assertRaises(TypeError, c.fma, Decimal('Infinity'), Decimal(0), "not a decimal") self.assertRaises(TypeError, c.fma, Decimal(1), Decimal('snan'), 1.222) # ... and for Decimal.fma. self.assertRaises(TypeError, Decimal('Infinity').fma, Decimal(0), "not a decimal") self.assertRaises(TypeError, Decimal(1).fma, Decimal('snan'), 1.222) def test_is_finite(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_finite(Decimal(10)) self.assertEqual(c.is_finite(10), d) self.assertRaises(TypeError, c.is_finite, '10') def test_is_infinite(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_infinite(Decimal(10)) self.assertEqual(c.is_infinite(10), d) self.assertRaises(TypeError, c.is_infinite, '10') def test_is_nan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_nan(Decimal(10)) self.assertEqual(c.is_nan(10), d) self.assertRaises(TypeError, c.is_nan, '10') def test_is_normal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_normal(Decimal(10)) self.assertEqual(c.is_normal(10), d) self.assertRaises(TypeError, c.is_normal, '10') def test_is_qnan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_qnan(Decimal(10)) self.assertEqual(c.is_qnan(10), d) self.assertRaises(TypeError, c.is_qnan, '10') def test_is_signed(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_signed(Decimal(10)) self.assertEqual(c.is_signed(10), d) self.assertRaises(TypeError, c.is_signed, '10') def test_is_snan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_snan(Decimal(10)) self.assertEqual(c.is_snan(10), d) self.assertRaises(TypeError, c.is_snan, '10') def test_is_subnormal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_subnormal(Decimal(10)) self.assertEqual(c.is_subnormal(10), d) self.assertRaises(TypeError, c.is_subnormal, '10') def test_is_zero(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_zero(Decimal(10)) self.assertEqual(c.is_zero(10), d) self.assertRaises(TypeError, c.is_zero, '10') def test_ln(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.ln(Decimal(10)) self.assertEqual(c.ln(10), d) self.assertRaises(TypeError, c.ln, '10') def test_log10(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.log10(Decimal(10)) self.assertEqual(c.log10(10), d) self.assertRaises(TypeError, c.log10, '10') def test_logb(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logb(Decimal(10)) self.assertEqual(c.logb(10), d) self.assertRaises(TypeError, c.logb, '10') def test_logical_and(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_and(Decimal(1), Decimal(1)) self.assertEqual(c.logical_and(1, 1), d) self.assertEqual(c.logical_and(Decimal(1), 1), d) self.assertEqual(c.logical_and(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_and, '1', 1) self.assertRaises(TypeError, c.logical_and, 1, '1') def test_logical_invert(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_invert(Decimal(1000)) self.assertEqual(c.logical_invert(1000), d) self.assertRaises(TypeError, c.logical_invert, '1000') def test_logical_or(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_or(Decimal(1), Decimal(1)) self.assertEqual(c.logical_or(1, 1), d) self.assertEqual(c.logical_or(Decimal(1), 1), d) self.assertEqual(c.logical_or(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_or, '1', 1) self.assertRaises(TypeError, c.logical_or, 1, '1') def test_logical_xor(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_xor(Decimal(1), Decimal(1)) self.assertEqual(c.logical_xor(1, 1), d) self.assertEqual(c.logical_xor(Decimal(1), 1), d) self.assertEqual(c.logical_xor(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_xor, '1', 1) self.assertRaises(TypeError, c.logical_xor, 1, '1') def test_max(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.max(Decimal(1), Decimal(2)) self.assertEqual(c.max(1, 2), d) self.assertEqual(c.max(Decimal(1), 2), d) self.assertEqual(c.max(1, Decimal(2)), d) self.assertRaises(TypeError, c.max, '1', 2) self.assertRaises(TypeError, c.max, 1, '2') def test_max_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.max_mag(Decimal(1), Decimal(2)) self.assertEqual(c.max_mag(1, 2), d) self.assertEqual(c.max_mag(Decimal(1), 2), d) self.assertEqual(c.max_mag(1, Decimal(2)), d) self.assertRaises(TypeError, c.max_mag, '1', 2) self.assertRaises(TypeError, c.max_mag, 1, '2') def test_min(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.min(Decimal(1), Decimal(2)) self.assertEqual(c.min(1, 2), d) self.assertEqual(c.min(Decimal(1), 2), d) self.assertEqual(c.min(1, Decimal(2)), d) self.assertRaises(TypeError, c.min, '1', 2) self.assertRaises(TypeError, c.min, 1, '2') def test_min_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.min_mag(Decimal(1), Decimal(2)) self.assertEqual(c.min_mag(1, 2), d) self.assertEqual(c.min_mag(Decimal(1), 2), d) self.assertEqual(c.min_mag(1, Decimal(2)), d) self.assertRaises(TypeError, c.min_mag, '1', 2) self.assertRaises(TypeError, c.min_mag, 1, '2') def test_minus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.minus(Decimal(10)) self.assertEqual(c.minus(10), d) self.assertRaises(TypeError, c.minus, '10') def test_multiply(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.multiply(Decimal(1), Decimal(2)) self.assertEqual(c.multiply(1, 2), d) self.assertEqual(c.multiply(Decimal(1), 2), d) self.assertEqual(c.multiply(1, Decimal(2)), d) self.assertRaises(TypeError, c.multiply, '1', 2) self.assertRaises(TypeError, c.multiply, 1, '2') def test_next_minus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_minus(Decimal(10)) self.assertEqual(c.next_minus(10), d) self.assertRaises(TypeError, c.next_minus, '10') def test_next_plus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_plus(Decimal(10)) self.assertEqual(c.next_plus(10), d) self.assertRaises(TypeError, c.next_plus, '10') def test_next_toward(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_toward(Decimal(1), Decimal(2)) self.assertEqual(c.next_toward(1, 2), d) self.assertEqual(c.next_toward(Decimal(1), 2), d) self.assertEqual(c.next_toward(1, Decimal(2)), d) self.assertRaises(TypeError, c.next_toward, '1', 2) self.assertRaises(TypeError, c.next_toward, 1, '2') def test_normalize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.normalize(Decimal(10)) self.assertEqual(c.normalize(10), d) self.assertRaises(TypeError, c.normalize, '10') def test_number_class(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() self.assertEqual(c.number_class(123), c.number_class(Decimal(123))) self.assertEqual(c.number_class(0), c.number_class(Decimal(0))) self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45))) def test_plus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.plus(Decimal(10)) self.assertEqual(c.plus(10), d) self.assertRaises(TypeError, c.plus, '10') def test_power(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.power(Decimal(1), Decimal(4)) self.assertEqual(c.power(1, 4), d) self.assertEqual(c.power(Decimal(1), 4), d) self.assertEqual(c.power(1, Decimal(4)), d) self.assertEqual(c.power(Decimal(1), Decimal(4)), d) self.assertRaises(TypeError, c.power, '1', 4) self.assertRaises(TypeError, c.power, 1, '4') self.assertEqual(c.power(modulo=5, b=8, a=2), 1) def test_quantize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.quantize(Decimal(1), Decimal(2)) self.assertEqual(c.quantize(1, 2), d) self.assertEqual(c.quantize(Decimal(1), 2), d) self.assertEqual(c.quantize(1, Decimal(2)), d) self.assertRaises(TypeError, c.quantize, '1', 2) self.assertRaises(TypeError, c.quantize, 1, '2') def test_remainder(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.remainder(Decimal(1), Decimal(2)) self.assertEqual(c.remainder(1, 2), d) self.assertEqual(c.remainder(Decimal(1), 2), d) self.assertEqual(c.remainder(1, Decimal(2)), d) self.assertRaises(TypeError, c.remainder, '1', 2) self.assertRaises(TypeError, c.remainder, 1, '2') def test_remainder_near(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.remainder_near(Decimal(1), Decimal(2)) self.assertEqual(c.remainder_near(1, 2), d) self.assertEqual(c.remainder_near(Decimal(1), 2), d) self.assertEqual(c.remainder_near(1, Decimal(2)), d) self.assertRaises(TypeError, c.remainder_near, '1', 2) self.assertRaises(TypeError, c.remainder_near, 1, '2') def test_rotate(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.rotate(Decimal(1), Decimal(2)) self.assertEqual(c.rotate(1, 2), d) self.assertEqual(c.rotate(Decimal(1), 2), d) self.assertEqual(c.rotate(1, Decimal(2)), d) self.assertRaises(TypeError, c.rotate, '1', 2) self.assertRaises(TypeError, c.rotate, 1, '2') def test_sqrt(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.sqrt(Decimal(10)) self.assertEqual(c.sqrt(10), d) self.assertRaises(TypeError, c.sqrt, '10') def test_same_quantum(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.same_quantum(Decimal(1), Decimal(2)) self.assertEqual(c.same_quantum(1, 2), d) self.assertEqual(c.same_quantum(Decimal(1), 2), d) self.assertEqual(c.same_quantum(1, Decimal(2)), d) self.assertRaises(TypeError, c.same_quantum, '1', 2) self.assertRaises(TypeError, c.same_quantum, 1, '2') def test_scaleb(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.scaleb(Decimal(1), Decimal(2)) self.assertEqual(c.scaleb(1, 2), d) self.assertEqual(c.scaleb(Decimal(1), 2), d) self.assertEqual(c.scaleb(1, Decimal(2)), d) self.assertRaises(TypeError, c.scaleb, '1', 2) self.assertRaises(TypeError, c.scaleb, 1, '2') def test_shift(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.shift(Decimal(1), Decimal(2)) self.assertEqual(c.shift(1, 2), d) self.assertEqual(c.shift(Decimal(1), 2), d) self.assertEqual(c.shift(1, Decimal(2)), d) self.assertRaises(TypeError, c.shift, '1', 2) self.assertRaises(TypeError, c.shift, 1, '2') def test_subtract(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.subtract(Decimal(1), Decimal(2)) self.assertEqual(c.subtract(1, 2), d) self.assertEqual(c.subtract(Decimal(1), 2), d) self.assertEqual(c.subtract(1, Decimal(2)), d) self.assertRaises(TypeError, c.subtract, '1', 2) self.assertRaises(TypeError, c.subtract, 1, '2') def test_to_eng_string(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_eng_string(Decimal(10)) self.assertEqual(c.to_eng_string(10), d) self.assertRaises(TypeError, c.to_eng_string, '10') def test_to_sci_string(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_sci_string(Decimal(10)) self.assertEqual(c.to_sci_string(10), d) self.assertRaises(TypeError, c.to_sci_string, '10') def test_to_integral_exact(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_integral_exact(Decimal(10)) self.assertEqual(c.to_integral_exact(10), d) self.assertRaises(TypeError, c.to_integral_exact, '10') def test_to_integral_value(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_integral_value(Decimal(10)) self.assertEqual(c.to_integral_value(10), d) self.assertRaises(TypeError, c.to_integral_value, '10') self.assertRaises(TypeError, c.to_integral_value, 10, 'x') class CContextAPItests(ContextAPItests): decimal = C class PyContextAPItests(ContextAPItests): decimal = P class ContextWithStatement(unittest.TestCase): # Can't do these as docstrings until Python 2.6 # as doctest can't handle __future__ statements def test_localcontext(self): # Use a copy of the current context in the block getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext orig_ctx = getcontext() with localcontext() as enter_ctx: set_ctx = getcontext() final_ctx = getcontext() self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly') self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context') self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context') def test_localcontextarg(self): # Use a copy of the supplied context in the block Context = self.decimal.Context getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext localcontext = self.decimal.localcontext orig_ctx = getcontext() new_ctx = Context(prec=42) with localcontext(new_ctx) as enter_ctx: set_ctx = getcontext() final_ctx = getcontext() self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly') self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context') self.assertIsNot(new_ctx, set_ctx, 'did not copy the context') self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context') def test_nested_with_statements(self): # Use a copy of the supplied context in the block Decimal = self.decimal.Decimal Context = self.decimal.Context getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext Clamped = self.decimal.Clamped Overflow = self.decimal.Overflow orig_ctx = getcontext() orig_ctx.clear_flags() new_ctx = Context(Emax=384) with localcontext() as c1: self.assertEqual(c1.flags, orig_ctx.flags) self.assertEqual(c1.traps, orig_ctx.traps) c1.traps[Clamped] = True c1.Emin = -383 self.assertNotEqual(orig_ctx.Emin, -383) self.assertRaises(Clamped, c1.create_decimal, '0e-999') self.assertTrue(c1.flags[Clamped]) with localcontext(new_ctx) as c2: self.assertEqual(c2.flags, new_ctx.flags) self.assertEqual(c2.traps, new_ctx.traps) self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2) self.assertFalse(c2.flags[Clamped]) self.assertTrue(c2.flags[Overflow]) del c2 self.assertFalse(c1.flags[Overflow]) del c1 self.assertNotEqual(orig_ctx.Emin, -383) self.assertFalse(orig_ctx.flags[Clamped]) self.assertFalse(orig_ctx.flags[Overflow]) self.assertFalse(new_ctx.flags[Clamped]) self.assertFalse(new_ctx.flags[Overflow]) def test_with_statements_gc1(self): localcontext = self.decimal.localcontext with localcontext() as c1: del c1 with localcontext() as c2: del c2 with localcontext() as c3: del c3 with localcontext() as c4: del c4 def test_with_statements_gc2(self): localcontext = self.decimal.localcontext with localcontext() as c1: with localcontext(c1) as c2: del c1 with localcontext(c2) as c3: del c2 with localcontext(c3) as c4: del c3 del c4 def test_with_statements_gc3(self): Context = self.decimal.Context localcontext = self.decimal.localcontext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext with localcontext() as c1: del c1 n1 = Context(prec=1) setcontext(n1) with localcontext(n1) as c2: del n1 self.assertEqual(c2.prec, 1) del c2 n2 = Context(prec=2) setcontext(n2) del n2 self.assertEqual(getcontext().prec, 2) n3 = Context(prec=3) setcontext(n3) self.assertEqual(getcontext().prec, 3) with localcontext(n3) as c3: del n3 self.assertEqual(c3.prec, 3) del c3 n4 = Context(prec=4) setcontext(n4) del n4 self.assertEqual(getcontext().prec, 4) with localcontext() as c4: self.assertEqual(c4.prec, 4) del c4 class CContextWithStatement(ContextWithStatement): decimal = C class PyContextWithStatement(ContextWithStatement): decimal = P class ContextFlags(unittest.TestCase): def test_flags_irrelevant(self): # check that the result (numeric result + flags raised) of an # arithmetic operation doesn't depend on the current flags Decimal = self.decimal.Decimal Context = self.decimal.Context Inexact = self.decimal.Inexact Rounded = self.decimal.Rounded Underflow = self.decimal.Underflow Clamped = self.decimal.Clamped Subnormal = self.decimal.Subnormal def raise_error(context, flag): if self.decimal == C: context.flags[flag] = True if context.traps[flag]: raise flag else: context._raise_error(flag) context = Context(prec=9, Emin = -425000000, Emax = 425000000, rounding=ROUND_HALF_EVEN, traps=[], flags=[]) # operations that raise various flags, in the form (function, arglist) operations = [ (context._apply, [Decimal("100E-425000010")]), (context.sqrt, [Decimal(2)]), (context.add, [Decimal("1.23456789"), Decimal("9.87654321")]), (context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]), (context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]), ] # try various flags individually, then a whole lot at once flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal], [Inexact, Rounded, Underflow, Clamped, Subnormal]] for fn, args in operations: # find answer and flags raised using a clean context context.clear_flags() ans = fn(*args) flags = [k for k, v in context.flags.items() if v] for extra_flags in flagsets: # set flags, before calling operation context.clear_flags() for flag in extra_flags: raise_error(context, flag) new_ans = fn(*args) # flags that we expect to be set after the operation expected_flags = list(flags) for flag in extra_flags: if flag not in expected_flags: expected_flags.append(flag) expected_flags.sort(key=id) # flags we actually got new_flags = [k for k,v in context.flags.items() if v] new_flags.sort(key=id) self.assertEqual(ans, new_ans, "operation produces different answers depending on flags set: " + "expected %s, got %s." % (ans, new_ans)) self.assertEqual(new_flags, expected_flags, "operation raises different flags depending on flags set: " + "expected %s, got %s" % (expected_flags, new_flags)) def test_flag_comparisons(self): Context = self.decimal.Context Inexact = self.decimal.Inexact Rounded = self.decimal.Rounded c = Context() # Valid SignalDict self.assertNotEqual(c.flags, c.traps) self.assertNotEqual(c.traps, c.flags) c.flags = c.traps self.assertEqual(c.flags, c.traps) self.assertEqual(c.traps, c.flags) c.flags[Rounded] = True c.traps = c.flags self.assertEqual(c.flags, c.traps) self.assertEqual(c.traps, c.flags) d = {} d.update(c.flags) self.assertEqual(d, c.flags) self.assertEqual(c.flags, d) d[Inexact] = True self.assertNotEqual(d, c.flags) self.assertNotEqual(c.flags, d) # Invalid SignalDict d = {Inexact:False} self.assertNotEqual(d, c.flags) self.assertNotEqual(c.flags, d) d = ["xyz"] self.assertNotEqual(d, c.flags) self.assertNotEqual(c.flags, d) @requires_IEEE_754 def test_float_operation(self): Decimal = self.decimal.Decimal FloatOperation = self.decimal.FloatOperation localcontext = self.decimal.localcontext with localcontext() as c: ##### trap is off by default self.assertFalse(c.traps[FloatOperation]) # implicit conversion sets the flag c.clear_flags() self.assertEqual(Decimal(7.5), 7.5) self.assertTrue(c.flags[FloatOperation]) c.clear_flags() self.assertEqual(c.create_decimal(7.5), 7.5) self.assertTrue(c.flags[FloatOperation]) # explicit conversion does not set the flag c.clear_flags() x = Decimal.from_float(7.5) self.assertFalse(c.flags[FloatOperation]) # comparison sets the flag self.assertEqual(x, 7.5) self.assertTrue(c.flags[FloatOperation]) c.clear_flags() x = c.create_decimal_from_float(7.5) self.assertFalse(c.flags[FloatOperation]) self.assertEqual(x, 7.5) self.assertTrue(c.flags[FloatOperation]) ##### set the trap c.traps[FloatOperation] = True # implicit conversion raises c.clear_flags() self.assertRaises(FloatOperation, Decimal, 7.5) self.assertTrue(c.flags[FloatOperation]) c.clear_flags() self.assertRaises(FloatOperation, c.create_decimal, 7.5) self.assertTrue(c.flags[FloatOperation]) # explicit conversion is silent c.clear_flags() x = Decimal.from_float(7.5) self.assertFalse(c.flags[FloatOperation]) c.clear_flags() x = c.create_decimal_from_float(7.5) self.assertFalse(c.flags[FloatOperation]) def test_float_comparison(self): Decimal = self.decimal.Decimal Context = self.decimal.Context FloatOperation = self.decimal.FloatOperation localcontext = self.decimal.localcontext def assert_attr(a, b, attr, context, signal=None): context.clear_flags() f = getattr(a, attr) if signal == FloatOperation: self.assertRaises(signal, f, b) else: self.assertIs(f(b), True) self.assertTrue(context.flags[FloatOperation]) small_d = Decimal('0.25') big_d = Decimal('3.0') small_f = 0.25 big_f = 3.0 zero_d = Decimal('0.0') neg_zero_d = Decimal('-0.0') zero_f = 0.0 neg_zero_f = -0.0 inf_d = Decimal('Infinity') neg_inf_d = Decimal('-Infinity') inf_f = float('inf') neg_inf_f = float('-inf') def doit(c, signal=None): # Order for attr in '__lt__', '__le__': assert_attr(small_d, big_f, attr, c, signal) for attr in '__gt__', '__ge__': assert_attr(big_d, small_f, attr, c, signal) # Equality assert_attr(small_d, small_f, '__eq__', c, None) assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None) assert_attr(neg_zero_d, zero_f, '__eq__', c, None) assert_attr(zero_d, neg_zero_f, '__eq__', c, None) assert_attr(zero_d, zero_f, '__eq__', c, None) assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None) assert_attr(inf_d, inf_f, '__eq__', c, None) # Inequality assert_attr(small_d, big_f, '__ne__', c, None) assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None) assert_attr(neg_inf_d, inf_f, '__ne__', c, None) assert_attr(inf_d, neg_inf_f, '__ne__', c, None) assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None) def test_containers(c, signal=None): c.clear_flags() s = set([100.0, Decimal('100.0')]) self.assertEqual(len(s), 1) self.assertTrue(c.flags[FloatOperation]) c.clear_flags() if signal: self.assertRaises(signal, sorted, [1.0, Decimal('10.0')]) else: s = sorted([10.0, Decimal('10.0')]) self.assertTrue(c.flags[FloatOperation]) c.clear_flags() b = 10.0 in [Decimal('10.0'), 1.0] self.assertTrue(c.flags[FloatOperation]) c.clear_flags() b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'} self.assertTrue(c.flags[FloatOperation]) nc = Context() with localcontext(nc) as c: self.assertFalse(c.traps[FloatOperation]) doit(c, signal=None) test_containers(c, signal=None) c.traps[FloatOperation] = True doit(c, signal=FloatOperation) test_containers(c, signal=FloatOperation) def test_float_operation_default(self): Decimal = self.decimal.Decimal Context = self.decimal.Context Inexact = self.decimal.Inexact FloatOperation= self.decimal.FloatOperation context = Context() self.assertFalse(context.flags[FloatOperation]) self.assertFalse(context.traps[FloatOperation]) context.clear_traps() context.traps[Inexact] = True context.traps[FloatOperation] = True self.assertTrue(context.traps[FloatOperation]) self.assertTrue(context.traps[Inexact]) class CContextFlags(ContextFlags): decimal = C class PyContextFlags(ContextFlags): decimal = P class SpecialContexts(unittest.TestCase): """Test the context templates.""" def test_context_templates(self): BasicContext = self.decimal.BasicContext ExtendedContext = self.decimal.ExtendedContext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow Underflow = self.decimal.Underflow Clamped = self.decimal.Clamped assert_signals(self, BasicContext, 'traps', [InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped] ) savecontext = getcontext().copy() basic_context_prec = BasicContext.prec extended_context_prec = ExtendedContext.prec ex = None try: BasicContext.prec = ExtendedContext.prec = 441 for template in BasicContext, ExtendedContext: setcontext(template) c = getcontext() self.assertIsNot(c, template) self.assertEqual(c.prec, 441) except Exception as e: ex = e.__class__ finally: BasicContext.prec = basic_context_prec ExtendedContext.prec = extended_context_prec setcontext(savecontext) if ex: raise ex def test_default_context(self): DefaultContext = self.decimal.DefaultContext BasicContext = self.decimal.BasicContext ExtendedContext = self.decimal.ExtendedContext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow self.assertEqual(BasicContext.prec, 9) self.assertEqual(ExtendedContext.prec, 9) assert_signals(self, DefaultContext, 'traps', [InvalidOperation, DivisionByZero, Overflow] ) savecontext = getcontext().copy() default_context_prec = DefaultContext.prec ex = None try: c = getcontext() saveprec = c.prec DefaultContext.prec = 961 c = getcontext() self.assertEqual(c.prec, saveprec) setcontext(DefaultContext) c = getcontext() self.assertIsNot(c, DefaultContext) self.assertEqual(c.prec, 961) except Exception as e: ex = e.__class__ finally: DefaultContext.prec = default_context_prec setcontext(savecontext) if ex: raise ex class CSpecialContexts(SpecialContexts): decimal = C class PySpecialContexts(SpecialContexts): decimal = P class ContextInputValidation(unittest.TestCase): def test_invalid_context(self): Context = self.decimal.Context DefaultContext = self.decimal.DefaultContext c = DefaultContext.copy() # prec, Emax for attr in ['prec', 'Emax']: setattr(c, attr, 999999) self.assertEqual(getattr(c, attr), 999999) self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(TypeError, setattr, c, attr, 'xyz') # Emin setattr(c, 'Emin', -999999) self.assertEqual(getattr(c, 'Emin'), -999999) self.assertRaises(ValueError, setattr, c, 'Emin', 1) self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3)) self.assertRaises(TypeError, setattr, c, 'rounding', -1) self.assertRaises(TypeError, setattr, c, 'rounding', 9) self.assertRaises(TypeError, setattr, c, 'rounding', 1.0) self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz') # capitals, clamp for attr in ['capitals', 'clamp']: self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(ValueError, setattr, c, attr, 2) self.assertRaises(TypeError, setattr, c, attr, [1,2,3]) # Invalid attribute self.assertRaises(AttributeError, setattr, c, 'emax', 100) # Invalid signal dict self.assertRaises(TypeError, setattr, c, 'flags', []) self.assertRaises(KeyError, setattr, c, 'flags', {}) self.assertRaises(KeyError, setattr, c, 'traps', {'InvalidOperation':0}) # Attributes cannot be deleted for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp', 'flags', 'traps']: self.assertRaises(AttributeError, c.__delattr__, attr) # Invalid attributes self.assertRaises(TypeError, getattr, c, 9) self.assertRaises(TypeError, setattr, c, 9) # Invalid values in constructor self.assertRaises(TypeError, Context, rounding=999999) self.assertRaises(TypeError, Context, rounding='xyz') self.assertRaises(ValueError, Context, clamp=2) self.assertRaises(ValueError, Context, capitals=-1) self.assertRaises(KeyError, Context, flags=["P"]) self.assertRaises(KeyError, Context, traps=["Q"]) # Type error in conversion self.assertRaises(TypeError, Context, flags=(0,1)) self.assertRaises(TypeError, Context, traps=(1,0)) class CContextInputValidation(ContextInputValidation): decimal = C class PyContextInputValidation(ContextInputValidation): decimal = P class ContextSubclassing(unittest.TestCase): def test_context_subclassing(self): decimal = self.decimal Decimal = decimal.Decimal Context = decimal.Context Clamped = decimal.Clamped DivisionByZero = decimal.DivisionByZero Inexact = decimal.Inexact Overflow = decimal.Overflow Rounded = decimal.Rounded Subnormal = decimal.Subnormal Underflow = decimal.Underflow InvalidOperation = decimal.InvalidOperation class MyContext(Context): def __init__(self, prec=None, rounding=None, Emin=None, Emax=None, capitals=None, clamp=None, flags=None, traps=None): Context.__init__(self) if prec is not None: self.prec = prec if rounding is not None: self.rounding = rounding if Emin is not None: self.Emin = Emin if Emax is not None: self.Emax = Emax if capitals is not None: self.capitals = capitals if clamp is not None: self.clamp = clamp if flags is not None: if isinstance(flags, list): flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags} self.flags = flags if traps is not None: if isinstance(traps, list): traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps} self.traps = traps c = Context() d = MyContext() for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp', 'flags', 'traps'): self.assertEqual(getattr(c, attr), getattr(d, attr)) # prec self.assertRaises(ValueError, MyContext, **{'prec':-1}) c = MyContext(prec=1) self.assertEqual(c.prec, 1) self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0) # rounding self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'}) c = MyContext(rounding=ROUND_DOWN, prec=1) self.assertEqual(c.rounding, ROUND_DOWN) self.assertEqual(c.plus(Decimal('9.9')), 9) # Emin self.assertRaises(ValueError, MyContext, **{'Emin':5}) c = MyContext(Emin=-1, prec=1) self.assertEqual(c.Emin, -1) x = c.add(Decimal('1e-99'), Decimal('2.234e-2000')) self.assertEqual(x, Decimal('0.0')) for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped): self.assertTrue(c.flags[signal]) # Emax self.assertRaises(ValueError, MyContext, **{'Emax':-1}) c = MyContext(Emax=1, prec=1) self.assertEqual(c.Emax, 1) self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000')) if self.decimal == C: for signal in (Inexact, Overflow, Rounded): self.assertTrue(c.flags[signal]) # capitals self.assertRaises(ValueError, MyContext, **{'capitals':-1}) c = MyContext(capitals=0) self.assertEqual(c.capitals, 0) x = c.create_decimal('1E222') self.assertEqual(c.to_sci_string(x), '1e+222') # clamp self.assertRaises(ValueError, MyContext, **{'clamp':2}) c = MyContext(clamp=1, Emax=99) self.assertEqual(c.clamp, 1) x = c.plus(Decimal('1e99')) self.assertEqual(str(x), '1.000000000000000000000000000E+99') # flags self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'}) c = MyContext(flags=[Rounded, DivisionByZero]) for signal in (Rounded, DivisionByZero): self.assertTrue(c.flags[signal]) c.clear_flags() for signal in OrderedSignals[decimal]: self.assertFalse(c.flags[signal]) # traps self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'}) c = MyContext(traps=[Rounded, DivisionByZero]) for signal in (Rounded, DivisionByZero): self.assertTrue(c.traps[signal]) c.clear_traps() for signal in OrderedSignals[decimal]: self.assertFalse(c.traps[signal]) class CContextSubclassing(ContextSubclassing): decimal = C class PyContextSubclassing(ContextSubclassing): decimal = P @skip_if_extra_functionality class CheckAttributes(unittest.TestCase): def test_module_attributes(self): # Architecture dependent context limits self.assertEqual(C.MAX_PREC, P.MAX_PREC) self.assertEqual(C.MAX_EMAX, P.MAX_EMAX) self.assertEqual(C.MIN_EMIN, P.MIN_EMIN) self.assertEqual(C.MIN_ETINY, P.MIN_ETINY) self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False) self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False) self.assertEqual(C.__version__, P.__version__) self.assertEqual(dir(C), dir(P)) def test_context_attributes(self): x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')] y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')] self.assertEqual(set(x) - set(y), set()) def test_decimal_attributes(self): x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')] y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')] self.assertEqual(set(x) - set(y), set()) class Coverage(unittest.TestCase): def test_adjusted(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal('1234e9999').adjusted(), 10002) # XXX raise? self.assertEqual(Decimal('nan').adjusted(), 0) self.assertEqual(Decimal('inf').adjusted(), 0) def test_canonical(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext x = Decimal(9).canonical() self.assertEqual(x, 9) c = getcontext() x = c.canonical(Decimal(9)) self.assertEqual(x, 9) def test_context_repr(self): c = self.decimal.DefaultContext.copy() c.prec = 425000000 c.Emax = 425000000 c.Emin = -425000000 c.rounding = ROUND_HALF_DOWN c.capitals = 0 c.clamp = 1 for sig in OrderedSignals[self.decimal]: c.flags[sig] = False c.traps[sig] = False s = c.__repr__() t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ "flags=[], traps=[])" self.assertEqual(s, t) def test_implicit_context(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext with localcontext() as c: c.prec = 1 c.Emax = 1 c.Emin = -1 # abs self.assertEqual(abs(Decimal("-10")), 10) # add self.assertEqual(Decimal("7") + 1, 8) # divide self.assertEqual(Decimal("10") / 5, 2) # divide_int self.assertEqual(Decimal("10") // 7, 1) # fma self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1) self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True) # three arg power self.assertEqual(pow(Decimal(10), 2, 7), 2) # exp self.assertEqual(Decimal("1.01").exp(), 3) # is_normal self.assertIs(Decimal("0.01").is_normal(), False) # is_subnormal self.assertIs(Decimal("0.01").is_subnormal(), True) # ln self.assertEqual(Decimal("20").ln(), 3) # log10 self.assertEqual(Decimal("20").log10(), 1) # logb self.assertEqual(Decimal("580").logb(), 2) # logical_invert self.assertEqual(Decimal("10").logical_invert(), 1) # minus self.assertEqual(-Decimal("-10"), 10) # multiply self.assertEqual(Decimal("2") * 4, 8) # next_minus self.assertEqual(Decimal("10").next_minus(), 9) # next_plus self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1')) # normalize self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1')) # number_class self.assertEqual(Decimal("10").number_class(), '+Normal') # plus self.assertEqual(+Decimal("-1"), -1) # remainder self.assertEqual(Decimal("10") % 7, 3) # subtract self.assertEqual(Decimal("10") - 7, 3) # to_integral_exact self.assertEqual(Decimal("1.12345").to_integral_exact(), 1) # Boolean functions self.assertTrue(Decimal("1").is_canonical()) self.assertTrue(Decimal("1").is_finite()) self.assertTrue(Decimal("1").is_finite()) self.assertTrue(Decimal("snan").is_snan()) self.assertTrue(Decimal("-1").is_signed()) self.assertTrue(Decimal("0").is_zero()) self.assertTrue(Decimal("0").is_zero()) # Copy with localcontext() as c: c.prec = 10000 x = 1228 ** 1523 y = -Decimal(x) z = y.copy_abs() self.assertEqual(z, x) z = y.copy_negate() self.assertEqual(z, x) z = y.copy_sign(Decimal(1)) self.assertEqual(z, x) def test_divmod(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero with localcontext() as c: q, r = divmod(Decimal("10912837129"), 1001) self.assertEqual(q, Decimal('10901935')) self.assertEqual(r, Decimal('194')) q, r = divmod(Decimal("NaN"), 7) self.assertTrue(q.is_nan() and r.is_nan()) c.traps[InvalidOperation] = False q, r = divmod(Decimal("NaN"), 7) self.assertTrue(q.is_nan() and r.is_nan()) c.traps[InvalidOperation] = False c.clear_flags() q, r = divmod(Decimal("inf"), Decimal("inf")) self.assertTrue(q.is_nan() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() q, r = divmod(Decimal("inf"), 101) self.assertTrue(q.is_infinite() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() q, r = divmod(Decimal(0), 0) self.assertTrue(q.is_nan() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.traps[DivisionByZero] = False c.clear_flags() q, r = divmod(Decimal(11), 0) self.assertTrue(q.is_infinite() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation] and c.flags[DivisionByZero]) def test_power(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext Overflow = self.decimal.Overflow Rounded = self.decimal.Rounded with localcontext() as c: c.prec = 3 c.clear_flags() self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00')) self.assertTrue(c.flags[Rounded]) c.prec = 1 c.Emax = 1 c.Emin = -1 c.clear_flags() c.traps[Overflow] = False self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf')) self.assertTrue(c.flags[Overflow]) def test_quantize(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation with localcontext() as c: c.prec = 1 c.Emax = 1 c.Emin = -1 c.traps[InvalidOperation] = False x = Decimal(99).quantize(Decimal("1e1")) self.assertTrue(x.is_nan()) def test_radix(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext c = getcontext() self.assertEqual(Decimal("1").radix(), 10) self.assertEqual(c.radix(), 10) def test_rop(self): Decimal = self.decimal.Decimal for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__', '__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'): self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented) def test_round(self): # Python3 behavior: round() returns Decimal Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext c = getcontext() c.prec = 28 self.assertEqual(str(Decimal("9.99").__round__()), "10") self.assertEqual(str(Decimal("9.99e-5").__round__()), "0") self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457") self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000") self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10") self.assertRaises(TypeError, Decimal("1.23").__round__, "5") self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8) def test_create_decimal(self): c = self.decimal.Context() self.assertRaises(ValueError, c.create_decimal, ["%"]) def test_int(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext with localcontext() as c: c.prec = 9999 x = Decimal(1221**1271) / 10**3923 self.assertEqual(int(x), 1) self.assertEqual(x.to_integral(), 2) def test_copy(self): Context = self.decimal.Context c = Context() c.prec = 10000 x = -(1172 ** 1712) y = c.copy_abs(x) self.assertEqual(y, -x) y = c.copy_negate(x) self.assertEqual(y, -x) y = c.copy_sign(x, 1) self.assertEqual(y, -x) class CCoverage(Coverage): decimal = C class PyCoverage(Coverage): decimal = P class PyFunctionality(unittest.TestCase): """Extra functionality in decimal.py""" def test_py_alternate_formatting(self): # triples giving a format, a Decimal, and the expected result Decimal = P.Decimal localcontext = P.localcontext test_values = [ # Issue 7094: Alternate formatting (specified by #) ('.0e', '1.0', '1e+0'), ('#.0e', '1.0', '1.e+0'), ('.0f', '1.0', '1'), ('#.0f', '1.0', '1.'), ('g', '1.1', '1.1'), ('#g', '1.1', '1.1'), ('.0g', '1', '1'), ('#.0g', '1', '1.'), ('.0%', '1.0', '100%'), ('#.0%', '1.0', '100.%'), ] for fmt, d, result in test_values: self.assertEqual(format(Decimal(d), fmt), result) class PyWhitebox(unittest.TestCase): """White box testing for decimal.py""" def test_py_exact_power(self): # Rarely exercised lines in _power_exact. Decimal = P.Decimal localcontext = P.localcontext with localcontext() as c: c.prec = 8 x = Decimal(2**16) ** Decimal("-0.5") self.assertEqual(x, Decimal('0.00390625')) x = Decimal(2**16) ** Decimal("-0.6") self.assertEqual(x, Decimal('0.0012885819')) x = Decimal("256e7") ** Decimal("-0.5") x = Decimal(152587890625) ** Decimal('-0.0625') self.assertEqual(x, Decimal("0.2")) x = Decimal("152587890625e7") ** Decimal('-0.0625') x = Decimal(5**2659) ** Decimal('-0.0625') c.prec = 1 x = Decimal("152587890625") ** Decimal('-0.5') c.prec = 201 x = Decimal(2**578) ** Decimal("-0.5") def test_py_immutability_operations(self): # Do operations and check that it didn't change internal objects. Decimal = P.Decimal DefaultContext = P.DefaultContext setcontext = P.setcontext c = DefaultContext.copy() c.traps = dict((s, 0) for s in OrderedSignals[P]) setcontext(c) d1 = Decimal('-25e55') b1 = Decimal('-25e55') d2 = Decimal('33e+33') b2 = Decimal('33e+33') def checkSameDec(operation, useOther=False): if useOther: eval("d1." + operation + "(d2)") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) self.assertEqual(d2._sign, b2._sign) self.assertEqual(d2._int, b2._int) self.assertEqual(d2._exp, b2._exp) else: eval("d1." + operation + "()") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) Decimal(d1) self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) checkSameDec("__abs__") checkSameDec("__add__", True) checkSameDec("__divmod__", True) checkSameDec("__eq__", True) checkSameDec("__ne__", True) checkSameDec("__le__", True) checkSameDec("__lt__", True) checkSameDec("__ge__", True) checkSameDec("__gt__", True) checkSameDec("__float__") checkSameDec("__floordiv__", True) checkSameDec("__hash__") checkSameDec("__int__") checkSameDec("__trunc__") checkSameDec("__mod__", True) checkSameDec("__mul__", True) checkSameDec("__neg__") checkSameDec("__bool__") checkSameDec("__pos__") checkSameDec("__pow__", True) checkSameDec("__radd__", True) checkSameDec("__rdivmod__", True) checkSameDec("__repr__") checkSameDec("__rfloordiv__", True) checkSameDec("__rmod__", True) checkSameDec("__rmul__", True) checkSameDec("__rpow__", True) checkSameDec("__rsub__", True) checkSameDec("__str__") checkSameDec("__sub__", True) checkSameDec("__truediv__", True) checkSameDec("adjusted") checkSameDec("as_tuple") checkSameDec("compare", True) checkSameDec("max", True) checkSameDec("min", True) checkSameDec("normalize") checkSameDec("quantize", True) checkSameDec("remainder_near", True) checkSameDec("same_quantum", True) checkSameDec("sqrt") checkSameDec("to_eng_string") checkSameDec("to_integral") def test_py_decimal_id(self): Decimal = P.Decimal d = Decimal(45) e = Decimal(d) self.assertEqual(str(e), '45') self.assertNotEqual(id(d), id(e)) def test_py_rescale(self): # Coverage Decimal = P.Decimal localcontext = P.localcontext with localcontext() as c: x = Decimal("NaN")._rescale(3, ROUND_UP) self.assertTrue(x.is_nan()) def test_py__round(self): # Coverage Decimal = P.Decimal self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP) class CFunctionality(unittest.TestCase): """Extra functionality in _decimal""" @requires_extra_functionality def test_c_ieee_context(self): # issue 8786: Add support for IEEE 754 contexts to decimal module. IEEEContext = C.IEEEContext DECIMAL32 = C.DECIMAL32 DECIMAL64 = C.DECIMAL64 DECIMAL128 = C.DECIMAL128 def assert_rest(self, context): self.assertEqual(context.clamp, 1) assert_signals(self, context, 'traps', []) assert_signals(self, context, 'flags', []) c = IEEEContext(DECIMAL32) self.assertEqual(c.prec, 7) self.assertEqual(c.Emax, 96) self.assertEqual(c.Emin, -95) assert_rest(self, c) c = IEEEContext(DECIMAL64) self.assertEqual(c.prec, 16) self.assertEqual(c.Emax, 384) self.assertEqual(c.Emin, -383) assert_rest(self, c) c = IEEEContext(DECIMAL128) self.assertEqual(c.prec, 34) self.assertEqual(c.Emax, 6144) self.assertEqual(c.Emin, -6143) assert_rest(self, c) # Invalid values self.assertRaises(OverflowError, IEEEContext, 2**63) self.assertRaises(ValueError, IEEEContext, -1) self.assertRaises(ValueError, IEEEContext, 1024) @requires_extra_functionality def test_c_context(self): Context = C.Context c = Context(flags=C.DecClamped, traps=C.DecRounded) self.assertEqual(c._flags, C.DecClamped) self.assertEqual(c._traps, C.DecRounded) @requires_extra_functionality def test_constants(self): # Condition flags cond = ( C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero, C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError, C.DecInexact, C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError, C.DecFloatOperation, C.DecOverflow, C.DecRounded, C.DecSubnormal, C.DecUnderflow ) # IEEEContext self.assertEqual(C.DECIMAL32, 32) self.assertEqual(C.DECIMAL64, 64) self.assertEqual(C.DECIMAL128, 128) self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512) # Conditions for i, v in enumerate(cond): self.assertEqual(v, 1<<i) self.assertEqual(C.DecIEEEInvalidOperation, C.DecConversionSyntax| C.DecDivisionImpossible| C.DecDivisionUndefined| C.DecFpuError| C.DecInvalidContext| C.DecInvalidOperation| C.DecMallocError) self.assertEqual(C.DecErrors, C.DecIEEEInvalidOperation| C.DecDivisionByZero) self.assertEqual(C.DecTraps, C.DecErrors|C.DecOverflow|C.DecUnderflow) class CWhitebox(unittest.TestCase): """Whitebox testing for _decimal""" def test_bignum(self): # Not exactly whitebox, but too slow with pydecimal. Decimal = C.Decimal localcontext = C.localcontext b1 = 10**35 b2 = 10**36 with localcontext() as c: c.prec = 1000000 for i in range(5): a = random.randrange(b1, b2) b = random.randrange(1000, 1200) x = a ** b y = Decimal(a) ** Decimal(b) self.assertEqual(x, y) def test_invalid_construction(self): self.assertRaises(TypeError, C.Decimal, 9, "xyz") def test_c_input_restriction(self): # Too large for _decimal to be converted exactly Decimal = C.Decimal InvalidOperation = C.InvalidOperation Context = C.Context localcontext = C.localcontext with localcontext(Context()): self.assertRaises(InvalidOperation, Decimal, "1e9999999999999999999") def test_c_context_repr(self): # This test is _decimal-only because flags are not printed # in the same order. DefaultContext = C.DefaultContext FloatOperation = C.FloatOperation c = DefaultContext.copy() c.prec = 425000000 c.Emax = 425000000 c.Emin = -425000000 c.rounding = ROUND_HALF_DOWN c.capitals = 0 c.clamp = 1 for sig in OrderedSignals[C]: c.flags[sig] = True c.traps[sig] = True c.flags[FloatOperation] = True c.traps[FloatOperation] = True s = c.__repr__() t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ "flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \ "FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \ "traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \ "FloatOperation, Overflow, Rounded, Subnormal, Underflow])" self.assertEqual(s, t) def test_c_context_errors(self): Context = C.Context InvalidOperation = C.InvalidOperation Overflow = C.Overflow FloatOperation = C.FloatOperation localcontext = C.localcontext getcontext = C.getcontext setcontext = C.setcontext HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) c = Context() # SignalDict: input validation self.assertRaises(KeyError, c.flags.__setitem__, 801, 0) self.assertRaises(KeyError, c.traps.__setitem__, 801, 0) self.assertRaises(ValueError, c.flags.__delitem__, Overflow) self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation) self.assertRaises(TypeError, setattr, c, 'flags', ['x']) self.assertRaises(TypeError, setattr, c,'traps', ['y']) self.assertRaises(KeyError, setattr, c, 'flags', {0:1}) self.assertRaises(KeyError, setattr, c, 'traps', {0:1}) # Test assignment from a signal dict with the correct length but # one invalid key. d = c.flags.copy() del d[FloatOperation] d["XYZ"] = 91283719 self.assertRaises(KeyError, setattr, c, 'flags', d) self.assertRaises(KeyError, setattr, c, 'traps', d) # Input corner cases int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1 gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9 # prec, Emax, Emin for attr in ['prec', 'Emax']: self.assertRaises(ValueError, setattr, c, attr, gt_max_emax) self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax) # prec, Emax, Emin in context constructor self.assertRaises(ValueError, Context, prec=gt_max_emax) self.assertRaises(ValueError, Context, Emax=gt_max_emax) self.assertRaises(ValueError, Context, Emin=-gt_max_emax) # Overflow in conversion self.assertRaises(OverflowError, Context, prec=int_max+1) self.assertRaises(OverflowError, Context, Emax=int_max+1) self.assertRaises(OverflowError, Context, Emin=-int_max-2) self.assertRaises(OverflowError, Context, clamp=int_max+1) self.assertRaises(OverflowError, Context, capitals=int_max+1) # OverflowError, general ValueError for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'): self.assertRaises(OverflowError, setattr, c, attr, int_max+1) self.assertRaises(OverflowError, setattr, c, attr, -int_max-2) if sys.platform != 'win32': self.assertRaises(ValueError, setattr, c, attr, int_max) self.assertRaises(ValueError, setattr, c, attr, -int_max-1) # OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax if C.MAX_PREC == 425000000: self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'), int_max+1) self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'), int_max+1) self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'), -int_max-2) # ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax if C.MAX_PREC == 425000000: self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0) self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 1070000001) self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1) self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), 1070000001) self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), -1070000001) self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1) # capitals, clamp for attr in ['capitals', 'clamp']: self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(ValueError, setattr, c, attr, 2) self.assertRaises(TypeError, setattr, c, attr, [1,2,3]) if HAVE_CONFIG_64: self.assertRaises(ValueError, setattr, c, attr, 2**32) self.assertRaises(ValueError, setattr, c, attr, 2**32+1) # Invalid local context self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass', locals()) self.assertRaises(TypeError, exec, 'with localcontext(context=getcontext()): pass', locals()) # setcontext saved_context = getcontext() self.assertRaises(TypeError, setcontext, "xyz") setcontext(saved_context) def test_rounding_strings_interned(self): self.assertIs(C.ROUND_UP, P.ROUND_UP) self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN) self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING) self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR) self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP) self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN) self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN) self.assertIs(C.ROUND_05UP, P.ROUND_05UP) @requires_extra_functionality def test_c_context_errors_extra(self): Context = C.Context InvalidOperation = C.InvalidOperation Overflow = C.Overflow localcontext = C.localcontext getcontext = C.getcontext setcontext = C.setcontext HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) c = Context() # Input corner cases int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1 # OverflowError, general ValueError self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1) self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2) if sys.platform != 'win32': self.assertRaises(ValueError, setattr, c, '_allcr', int_max) self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1) # OverflowError, general TypeError for attr in ('_flags', '_traps'): self.assertRaises(OverflowError, setattr, c, attr, int_max+1) self.assertRaises(OverflowError, setattr, c, attr, -int_max-2) if sys.platform != 'win32': self.assertRaises(TypeError, setattr, c, attr, int_max) self.assertRaises(TypeError, setattr, c, attr, -int_max-1) # _allcr self.assertRaises(ValueError, setattr, c, '_allcr', -1) self.assertRaises(ValueError, setattr, c, '_allcr', 2) self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3]) if HAVE_CONFIG_64: self.assertRaises(ValueError, setattr, c, '_allcr', 2**32) self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1) # _flags, _traps for attr in ['_flags', '_traps']: self.assertRaises(TypeError, setattr, c, attr, 999999) self.assertRaises(TypeError, setattr, c, attr, 'x') def test_c_valid_context(self): # These tests are for code coverage in _decimal. DefaultContext = C.DefaultContext Clamped = C.Clamped Underflow = C.Underflow Inexact = C.Inexact Rounded = C.Rounded Subnormal = C.Subnormal c = DefaultContext.copy() # Exercise all getters and setters c.prec = 34 c.rounding = ROUND_HALF_UP c.Emax = 3000 c.Emin = -3000 c.capitals = 1 c.clamp = 0 self.assertEqual(c.prec, 34) self.assertEqual(c.rounding, ROUND_HALF_UP) self.assertEqual(c.Emin, -3000) self.assertEqual(c.Emax, 3000) self.assertEqual(c.capitals, 1) self.assertEqual(c.clamp, 0) self.assertEqual(c.Etiny(), -3033) self.assertEqual(c.Etop(), 2967) # Exercise all unsafe setters if C.MAX_PREC == 425000000: c._unsafe_setprec(999999999) c._unsafe_setemax(999999999) c._unsafe_setemin(-999999999) self.assertEqual(c.prec, 999999999) self.assertEqual(c.Emax, 999999999) self.assertEqual(c.Emin, -999999999) @requires_extra_functionality def test_c_valid_context_extra(self): DefaultContext = C.DefaultContext c = DefaultContext.copy() self.assertEqual(c._allcr, 1) c._allcr = 0 self.assertEqual(c._allcr, 0) def test_c_round(self): # Restricted input. Decimal = C.Decimal InvalidOperation = C.InvalidOperation localcontext = C.localcontext MAX_EMAX = C.MAX_EMAX MIN_ETINY = C.MIN_ETINY int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1 with localcontext() as c: c.traps[InvalidOperation] = True self.assertRaises(InvalidOperation, Decimal("1.23").__round__, -int_max-1) self.assertRaises(InvalidOperation, Decimal("1.23").__round__, int_max) self.assertRaises(InvalidOperation, Decimal("1").__round__, int(MAX_EMAX+1)) self.assertRaises(C.InvalidOperation, Decimal("1").__round__, -int(MIN_ETINY-1)) self.assertRaises(OverflowError, Decimal("1.23").__round__, -int_max-2) self.assertRaises(OverflowError, Decimal("1.23").__round__, int_max+1) def test_c_format(self): # Restricted input Decimal = C.Decimal HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9) self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9) self.assertRaises(TypeError, Decimal(1).__format__, []) self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10") maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1 self.assertRaises(ValueError, Decimal("1.23456789").__format__, "=%d.1" % maxsize) def test_c_integral(self): Decimal = C.Decimal Inexact = C.Inexact localcontext = C.localcontext x = Decimal(10) self.assertEqual(x.to_integral(), 10) self.assertRaises(TypeError, x.to_integral, '10') self.assertRaises(TypeError, x.to_integral, 10, 'x') self.assertRaises(TypeError, x.to_integral, 10) self.assertEqual(x.to_integral_value(), 10) self.assertRaises(TypeError, x.to_integral_value, '10') self.assertRaises(TypeError, x.to_integral_value, 10, 'x') self.assertRaises(TypeError, x.to_integral_value, 10) self.assertEqual(x.to_integral_exact(), 10) self.assertRaises(TypeError, x.to_integral_exact, '10') self.assertRaises(TypeError, x.to_integral_exact, 10, 'x') self.assertRaises(TypeError, x.to_integral_exact, 10) with localcontext() as c: x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP) self.assertEqual(x, Decimal('100000000000000000000000000')) x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP) self.assertEqual(x, Decimal('100000000000000000000000000')) c.traps[Inexact] = True self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP) def test_c_funcs(self): # Invalid arguments Decimal = C.Decimal InvalidOperation = C.InvalidOperation DivisionByZero = C.DivisionByZero getcontext = C.getcontext localcontext = C.localcontext self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9') self.assertRaises(TypeError, pow, Decimal(1), 2, "3") self.assertRaises(TypeError, Decimal(9).number_class, "x", "y") self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y") self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), [] ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext() ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), 10 ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000 ) with localcontext() as c: c.clear_traps() # Invalid arguments self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y") self.assertRaises(TypeError, c.canonical, 200) self.assertRaises(TypeError, c.is_canonical, 200) self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y") self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y") self.assertEqual(str(c.canonical(Decimal(200))), '200') self.assertEqual(c.radix(), 10) c.traps[DivisionByZero] = True self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0) self.assertRaises(DivisionByZero, c.divmod, 9, 0) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() c.traps[InvalidOperation] = True self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0) self.assertRaises(InvalidOperation, c.divmod, 9, 0) self.assertTrue(c.flags[DivisionByZero]) c.traps[InvalidOperation] = True c.prec = 2 self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501) def test_va_args_exceptions(self): Decimal = C.Decimal Context = C.Context x = Decimal("10001111111") for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10', 'logb', 'logical_invert', 'next_minus', 'next_plus', 'normalize', 'number_class', 'sqrt', 'to_eng_string']: func = getattr(x, attr) self.assertRaises(TypeError, func, context="x") self.assertRaises(TypeError, func, "x", context=None) for attr in ['compare', 'compare_signal', 'logical_and', 'logical_or', 'max', 'max_mag', 'min', 'min_mag', 'remainder_near', 'rotate', 'scaleb', 'shift']: func = getattr(x, attr) self.assertRaises(TypeError, func, context="x") self.assertRaises(TypeError, func, "x", context=None) self.assertRaises(TypeError, x.to_integral, rounding=None, context=[]) self.assertRaises(TypeError, x.to_integral, rounding={}, context=[]) self.assertRaises(TypeError, x.to_integral, [], []) self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[]) self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[]) self.assertRaises(TypeError, x.to_integral_value, [], []) self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[]) self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[]) self.assertRaises(TypeError, x.to_integral_exact, [], []) self.assertRaises(TypeError, x.fma, 1, 2, context="x") self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None) self.assertRaises(TypeError, x.quantize, 1, [], context=None) self.assertRaises(TypeError, x.quantize, 1, [], rounding=None) self.assertRaises(TypeError, x.quantize, 1, [], []) c = Context() self.assertRaises(TypeError, c.power, 1, 2, mod="x") self.assertRaises(TypeError, c.power, 1, "x", mod=None) self.assertRaises(TypeError, c.power, "x", 2, mod=None) @requires_extra_functionality def test_c_context_templates(self): self.assertEqual( C.BasicContext._traps, C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow| C.DecUnderflow|C.DecClamped ) self.assertEqual( C.DefaultContext._traps, C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow ) @requires_extra_functionality def test_c_signal_dict(self): # SignalDict coverage Context = C.Context DefaultContext = C.DefaultContext InvalidOperation = C.InvalidOperation DivisionByZero = C.DivisionByZero Overflow = C.Overflow Subnormal = C.Subnormal Underflow = C.Underflow Rounded = C.Rounded Inexact = C.Inexact Clamped = C.Clamped DecClamped = C.DecClamped DecInvalidOperation = C.DecInvalidOperation DecIEEEInvalidOperation = C.DecIEEEInvalidOperation def assertIsExclusivelySet(signal, signal_dict): for sig in signal_dict: if sig == signal: self.assertTrue(signal_dict[sig]) else: self.assertFalse(signal_dict[sig]) c = DefaultContext.copy() # Signal dict methods self.assertTrue(Overflow in c.traps) c.clear_traps() for k in c.traps.keys(): c.traps[k] = True for v in c.traps.values(): self.assertTrue(v) c.clear_traps() for k, v in c.traps.items(): self.assertFalse(v) self.assertFalse(c.flags.get(Overflow)) self.assertIs(c.flags.get("x"), None) self.assertEqual(c.flags.get("x", "y"), "y") self.assertRaises(TypeError, c.flags.get, "x", "y", "z") self.assertEqual(len(c.flags), len(c.traps)) s = sys.getsizeof(c.flags) s = sys.getsizeof(c.traps) s = c.flags.__repr__() # Set flags/traps. c.clear_flags() c._flags = DecClamped self.assertTrue(c.flags[Clamped]) c.clear_traps() c._traps = DecInvalidOperation self.assertTrue(c.traps[InvalidOperation]) # Set flags/traps from dictionary. c.clear_flags() d = c.flags.copy() d[DivisionByZero] = True c.flags = d assertIsExclusivelySet(DivisionByZero, c.flags) c.clear_traps() d = c.traps.copy() d[Underflow] = True c.traps = d assertIsExclusivelySet(Underflow, c.traps) # Random constructors IntSignals = { Clamped: C.DecClamped, Rounded: C.DecRounded, Inexact: C.DecInexact, Subnormal: C.DecSubnormal, Underflow: C.DecUnderflow, Overflow: C.DecOverflow, DivisionByZero: C.DecDivisionByZero, InvalidOperation: C.DecIEEEInvalidOperation } IntCond = [ C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError, C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError, C.DecConversionSyntax, ] lim = len(OrderedSignals[C]) for r in range(lim): for t in range(lim): for round in RoundingModes: flags = random.sample(OrderedSignals[C], r) traps = random.sample(OrderedSignals[C], t) prec = random.randrange(1, 10000) emin = random.randrange(-10000, 0) emax = random.randrange(0, 10000) clamp = random.randrange(0, 2) caps = random.randrange(0, 2) cr = random.randrange(0, 2) c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax, capitals=caps, clamp=clamp, flags=list(flags), traps=list(traps)) self.assertEqual(c.prec, prec) self.assertEqual(c.rounding, round) self.assertEqual(c.Emin, emin) self.assertEqual(c.Emax, emax) self.assertEqual(c.capitals, caps) self.assertEqual(c.clamp, clamp) f = 0 for x in flags: f |= IntSignals[x] self.assertEqual(c._flags, f) f = 0 for x in traps: f |= IntSignals[x] self.assertEqual(c._traps, f) for cond in IntCond: c._flags = cond self.assertTrue(c._flags&DecIEEEInvalidOperation) assertIsExclusivelySet(InvalidOperation, c.flags) for cond in IntCond: c._traps = cond self.assertTrue(c._traps&DecIEEEInvalidOperation) assertIsExclusivelySet(InvalidOperation, c.traps) def test_invalid_override(self): Decimal = C.Decimal try: from locale import CHAR_MAX except ImportError: self.skipTest('locale.CHAR_MAX not available') def make_grouping(lst): return ''.join([chr(x) for x in lst]) def get_fmt(x, override=None, fmt='n'): return Decimal(x).__format__(fmt, override) invalid_grouping = { 'decimal_point' : ',', 'grouping' : make_grouping([255, 255, 0]), 'thousands_sep' : ',' } invalid_dot = { 'decimal_point' : 'xxxxx', 'grouping' : make_grouping([3, 3, 0]), 'thousands_sep' : ',' } invalid_sep = { 'decimal_point' : '.', 'grouping' : make_grouping([3, 3, 0]), 'thousands_sep' : 'yyyyy' } if CHAR_MAX == 127: # negative grouping in override self.assertRaises(ValueError, get_fmt, 12345, invalid_grouping, 'g') self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g') self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g') def test_exact_conversion(self): Decimal = C.Decimal localcontext = C.localcontext InvalidOperation = C.InvalidOperation with localcontext() as c: c.traps[InvalidOperation] = True # Clamped x = "0e%d" % sys.maxsize self.assertRaises(InvalidOperation, Decimal, x) x = "0e%d" % (-sys.maxsize-1) self.assertRaises(InvalidOperation, Decimal, x) # Overflow x = "1e%d" % sys.maxsize self.assertRaises(InvalidOperation, Decimal, x) # Underflow x = "1e%d" % (-sys.maxsize-1) self.assertRaises(InvalidOperation, Decimal, x) def test_from_tuple(self): Decimal = C.Decimal localcontext = C.localcontext InvalidOperation = C.InvalidOperation Overflow = C.Overflow Underflow = C.Underflow with localcontext() as c: c.traps[InvalidOperation] = True c.traps[Overflow] = True c.traps[Underflow] = True # SSIZE_MAX x = (1, (), sys.maxsize) self.assertEqual(str(c.create_decimal(x)), '-0E+999999') self.assertRaises(InvalidOperation, Decimal, x) x = (1, (0, 1, 2), sys.maxsize) self.assertRaises(Overflow, c.create_decimal, x) self.assertRaises(InvalidOperation, Decimal, x) # SSIZE_MIN x = (1, (), -sys.maxsize-1) self.assertEqual(str(c.create_decimal(x)), '-0E-1000026') self.assertRaises(InvalidOperation, Decimal, x) x = (1, (0, 1, 2), -sys.maxsize-1) self.assertRaises(Underflow, c.create_decimal, x) self.assertRaises(InvalidOperation, Decimal, x) # OverflowError x = (1, (), sys.maxsize+1) self.assertRaises(OverflowError, c.create_decimal, x) self.assertRaises(OverflowError, Decimal, x) x = (1, (), -sys.maxsize-2) self.assertRaises(OverflowError, c.create_decimal, x) self.assertRaises(OverflowError, Decimal, x) # Specials x = (1, (), "N") self.assertEqual(str(Decimal(x)), '-sNaN') x = (1, (0,), "N") self.assertEqual(str(Decimal(x)), '-sNaN') x = (1, (0, 1), "N") self.assertEqual(str(Decimal(x)), '-sNaN1') def test_sizeof(self): Decimal = C.Decimal HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) self.assertGreater(Decimal(0).__sizeof__(), 0) if HAVE_CONFIG_64: x = Decimal(10**(19*24)).__sizeof__() y = Decimal(10**(19*25)).__sizeof__() self.assertEqual(y, x+8) else: x = Decimal(10**(9*24)).__sizeof__() y = Decimal(10**(9*25)).__sizeof__() self.assertEqual(y, x+4) @requires_docstrings @unittest.skipUnless(C, "test requires C version") class SignatureTest(unittest.TestCase): """Function signatures""" def test_inspect_module(self): for attr in dir(P): if attr.startswith('_'): continue p_func = getattr(P, attr) c_func = getattr(C, attr) if (attr == 'Decimal' or attr == 'Context' or inspect.isfunction(p_func)): p_sig = inspect.signature(p_func) c_sig = inspect.signature(c_func) # parameter names: c_names = list(c_sig.parameters.keys()) p_names = [x for x in p_sig.parameters.keys() if not x.startswith('_')] self.assertEqual(c_names, p_names, msg="parameter name mismatch in %s" % p_func) c_kind = [x.kind for x in c_sig.parameters.values()] p_kind = [x[1].kind for x in p_sig.parameters.items() if not x[0].startswith('_')] # parameters: if attr != 'setcontext': self.assertEqual(c_kind, p_kind, msg="parameter kind mismatch in %s" % p_func) def test_inspect_types(self): POS = inspect._ParameterKind.POSITIONAL_ONLY POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD # Type heuristic (type annotations would help!): pdict = {C: {'other': C.Decimal(1), 'third': C.Decimal(1), 'x': C.Decimal(1), 'y': C.Decimal(1), 'z': C.Decimal(1), 'a': C.Decimal(1), 'b': C.Decimal(1), 'c': C.Decimal(1), 'exp': C.Decimal(1), 'modulo': C.Decimal(1), 'num': "1", 'f': 1.0, 'rounding': C.ROUND_HALF_UP, 'context': C.getcontext()}, P: {'other': P.Decimal(1), 'third': P.Decimal(1), 'a': P.Decimal(1), 'b': P.Decimal(1), 'c': P.Decimal(1), 'exp': P.Decimal(1), 'modulo': P.Decimal(1), 'num': "1", 'f': 1.0, 'rounding': P.ROUND_HALF_UP, 'context': P.getcontext()}} def mkargs(module, sig): args = [] kwargs = {} for name, param in sig.parameters.items(): if name == 'self': continue if param.kind == POS: args.append(pdict[module][name]) elif param.kind == POS_KWD: kwargs[name] = pdict[module][name] else: raise TestFailed("unexpected parameter kind") return args, kwargs def tr(s): """The C Context docstrings use 'x' in order to prevent confusion with the article 'a' in the descriptions.""" if s == 'x': return 'a' if s == 'y': return 'b' if s == 'z': return 'c' return s def doit(ty): p_type = getattr(P, ty) c_type = getattr(C, ty) for attr in dir(p_type): if attr.startswith('_'): continue p_func = getattr(p_type, attr) c_func = getattr(c_type, attr) if inspect.isfunction(p_func): p_sig = inspect.signature(p_func) c_sig = inspect.signature(c_func) # parameter names: p_names = list(p_sig.parameters.keys()) c_names = [tr(x) for x in c_sig.parameters.keys()] self.assertEqual(c_names, p_names, msg="parameter name mismatch in %s" % p_func) p_kind = [x.kind for x in p_sig.parameters.values()] c_kind = [x.kind for x in c_sig.parameters.values()] # 'self' parameter: self.assertIs(p_kind[0], POS_KWD) self.assertIs(c_kind[0], POS) # remaining parameters: if ty == 'Decimal': self.assertEqual(c_kind[1:], p_kind[1:], msg="parameter kind mismatch in %s" % p_func) else: # Context methods are positional only in the C version. self.assertEqual(len(c_kind), len(p_kind), msg="parameter kind mismatch in %s" % p_func) # Run the function: args, kwds = mkargs(C, c_sig) try: getattr(c_type(9), attr)(*args, **kwds) except Exception as err: raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds)) args, kwds = mkargs(P, p_sig) try: getattr(p_type(9), attr)(*args, **kwds) except Exception as err: raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds)) doit('Decimal') doit('Context') all_tests = [ CExplicitConstructionTest, PyExplicitConstructionTest, CImplicitConstructionTest, PyImplicitConstructionTest, CFormatTest, PyFormatTest, CArithmeticOperatorsTest, PyArithmeticOperatorsTest, CThreadingTest, PyThreadingTest, CUsabilityTest, PyUsabilityTest, CPythonAPItests, PyPythonAPItests, CContextAPItests, PyContextAPItests, CContextWithStatement, PyContextWithStatement, CContextFlags, PyContextFlags, CSpecialContexts, PySpecialContexts, CContextInputValidation, PyContextInputValidation, CContextSubclassing, PyContextSubclassing, CCoverage, PyCoverage, CFunctionality, PyFunctionality, CWhitebox, PyWhitebox, CIBMTestCases, PyIBMTestCases, ] # Delete C tests if _decimal.so is not present. if not C: all_tests = all_tests[1::2] else: all_tests.insert(0, CheckAttributes) all_tests.insert(1, SignatureTest) def test_main(arith=None, verbose=None, todo_tests=None, debug=None): """ Execute the tests. Runs all arithmetic tests if arith is True or if the "decimal" resource is enabled in regrtest.py """ init(C) init(P) global TEST_ALL, DEBUG TEST_ALL = arith if arith is not None else is_resource_enabled('decimal') DEBUG = debug if todo_tests is None: test_classes = all_tests else: test_classes = [CIBMTestCases, PyIBMTestCases] # Dynamically build custom test definition for each file in the test # directory and add the definitions to the DecimalTest class. This # procedure insures that new files do not get skipped. for filename in os.listdir(directory): if '.decTest' not in filename or filename.startswith("."): continue head, tail = filename.split('.') if todo_tests is not None and head not in todo_tests: continue tester = lambda self, f=filename: self.eval_file(directory + f) setattr(CIBMTestCases, 'test_' + head, tester) setattr(PyIBMTestCases, 'test_' + head, tester) del filename, head, tail, tester try: run_unittest(*test_classes) if todo_tests is None: from doctest import IGNORE_EXCEPTION_DETAIL savedecimal = sys.modules['decimal'] if C: sys.modules['decimal'] = C run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL) sys.modules['decimal'] = P run_doctest(P, verbose) sys.modules['decimal'] = savedecimal finally: if C: C.setcontext(ORIGINAL_CONTEXT[C]) P.setcontext(ORIGINAL_CONTEXT[P]) if not C: warnings.warn('C tests skipped: no module named _decimal.', UserWarning) if not orig_sys_decimal is sys.modules['decimal']: raise TestFailed("Internal error: unbalanced number of changes to " "sys.modules['decimal'].") if __name__ == '__main__': import optparse p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]") p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test') p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests') (opt, args) = p.parse_args() if opt.skip: test_main(arith=False, verbose=True) elif args: test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug) else: test_main(arith=True, verbose=True)
chatbox_nodb.py
import sys import time import telepot from telepot.loop import MessageLoop from telepot.delegate import ( per_chat_id_in, per_application, call, create_open, pave_event_space) """ $ python3.5 chatbox_nodb.py <token> <owner_id> Chatbox - a mailbox for chats 1. People send messages to your bot. 2. Your bot remembers the messages. 3. You read the messages later. It accepts the following commands from you, the owner, only: - `/unread` - tells you who has sent you messages and how many - `/next` - read next sender's messages This example can be a starting point for **customer support** type of bots. For example, customers send questions to a bot account; staff answers questions behind the scene, makes it look like the bot is answering questions. It further illustrates the use of `DelegateBot` and `ChatHandler`, and how to spawn delegates differently according to the role of users. This example only handles text messages and stores messages in memory. If the bot is killed, all messages are lost. It is an *example* after all. """ # Simulate a database to store unread messages class UnreadStore(object): def __init__(self): self._db = {} def put(self, msg): chat_id = msg['chat']['id'] if chat_id not in self._db: self._db[chat_id] = [] self._db[chat_id].append(msg) # Pull all unread messages of a `chat_id` def pull(self, chat_id): messages = self._db[chat_id] del self._db[chat_id] # sort by date messages.sort(key=lambda m: m['date']) return messages # Tells how many unread messages per chat_id def unread_per_chat(self): return [(k,len(v)) for k,v in self._db.items()] # Accept commands from owner. Give him unread messages. class OwnerHandler(telepot.helper.ChatHandler): def __init__(self, seed_tuple, store, **kwargs): super(OwnerHandler, self).__init__(seed_tuple, **kwargs) self._store = store def _read_messages(self, messages): for msg in messages: # assume all messages are text self.sender.sendMessage(msg['text']) def on_chat_message(self, msg): content_type, chat_type, chat_id = telepot.glance(msg) if content_type != 'text': self.sender.sendMessage("I don't understand") return command = msg['text'].strip().lower() # Tells who has sent you how many messages if command == '/unread': results = self._store.unread_per_chat() lines = [] for r in results: n = 'ID: %d\n%d unread' % r lines.append(n) if not len(lines): self.sender.sendMessage('No unread messages') else: self.sender.sendMessage('\n'.join(lines)) # read next sender's messages elif command == '/next': results = self._store.unread_per_chat() if not len(results): self.sender.sendMessage('No unread messages') return chat_id = results[0][0] unread_messages = self._store.pull(chat_id) self.sender.sendMessage('From ID: %d' % chat_id) self._read_messages(unread_messages) else: self.sender.sendMessage("I don't understand") class MessageSaver(telepot.helper.Monitor): def __init__(self, seed_tuple, store, exclude): # The `capture` criteria means to capture all messages. super(MessageSaver, self).__init__(seed_tuple, capture=[[lambda msg: not telepot.is_event(msg)]]) self._store = store self._exclude = exclude # Store every message, except those whose sender is in the exclude list, or non-text messages. def on_chat_message(self, msg): content_type, chat_type, chat_id = telepot.glance(msg) if chat_id in self._exclude: print('Chat id %d is excluded.' % chat_id) return if content_type != 'text': print('Content type %s is ignored.' % content_type) return print('Storing message: %s' % msg) self._store.put(msg) import threading class CustomThread(threading.Thread): def start(self): print('CustomThread starting ...') super(CustomThread, self).start() # Note how this function wraps around the `call()` function below to implement # a custom thread for delegation. def custom_thread(func): def f(seed_tuple): target = func(seed_tuple) if type(target) is tuple: run, args, kwargs = target t = CustomThread(target=run, args=args, kwargs=kwargs) else: t = CustomThread(target=target) return t return f class ChatBox(telepot.DelegatorBot): def __init__(self, token, owner_id): self._owner_id = owner_id self._seen = set() self._store = UnreadStore() super(ChatBox, self).__init__(token, [ # Here is a delegate to specially handle owner commands. pave_event_space()( per_chat_id_in([owner_id]), create_open, OwnerHandler, self._store, timeout=20), # Only one MessageSaver is ever spawned for entire application. (per_application(), create_open(MessageSaver, self._store, exclude=[owner_id])), # For senders never seen before, send him a welcome message. (self._is_newcomer, custom_thread(call(self._send_welcome))), ]) # seed-calculating function: use returned value to indicate whether to spawn a delegate def _is_newcomer(self, msg): if telepot.is_event(msg): return None chat_id = msg['chat']['id'] if chat_id == self._owner_id: # Sender is owner return None # No delegate spawned if chat_id in self._seen: # Sender has been seen before return None # No delegate spawned self._seen.add(chat_id) return [] # non-hashable ==> delegates are independent, no seed association is made. def _send_welcome(self, seed_tuple): chat_id = seed_tuple[1]['chat']['id'] print('Sending welcome ...') self.sendMessage(chat_id, 'Hello!') TOKEN = sys.argv[1] OWNER_ID = int(sys.argv[2]) bot = ChatBox(TOKEN, OWNER_ID) MessageLoop(bot).run_as_thread() print('Listening ...') while 1: time.sleep(10)
log.py
#!/usr/bin/env python """ Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/) See the file 'LICENSE' for copying permission """ from __future__ import print_function import datetime import json import os import re import signal import socket import sys import threading import time import traceback from core.common import check_whitelisted from core.common import check_sudo from core.compat import xrange from core.enums import TRAIL from core.settings import CEF_FORMAT from core.settings import config from core.settings import CONDENSE_ON_INFO_KEYWORDS from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS from core.settings import HOSTNAME from core.settings import NAME from core.settings import TIME_FORMAT from core.settings import UNICODE_ENCODING from core.settings import VERSION from core.ignore import ignore_event from thirdparty.odict import OrderedDict from thirdparty.six.moves import socketserver as _socketserver _condensed_events = {} _condensing_thread = None _condensing_lock = threading.Lock() _single_messages = set() _thread_data = threading.local() def create_log_directory(): if not os.path.isdir(config.LOG_DIR): if not config.DISABLE_CHECK_SUDO and check_sudo() is False: exit("[!] please rerun with sudo/Administrator privileges") os.makedirs(config.LOG_DIR, 0o755) print("[i] using '%s' for log storage" % config.LOG_DIR) def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True): retval = None localtime = time.localtime(sec) _ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday)) if not reuse: if not os.path.exists(_): open(_, "w+").close() os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS) retval = os.open(_, flags) else: if _ != getattr(_thread_data, "event_log_path", None): if getattr(_thread_data, "event_log_handle", None): try: os.close(_thread_data.event_log_handle) except OSError: pass if not os.path.exists(_): open(_, "w+").close() os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS) _thread_data.event_log_path = _ _thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags) retval = _thread_data.event_log_handle return retval def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY): if not hasattr(_thread_data, "error_log_handle"): _ = os.path.join(config.get("LOG_DIR") or os.curdir, "error.log") if not os.path.exists(_): open(_, "w+").close() os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS) _thread_data.error_log_path = _ _thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags) return _thread_data.error_log_handle def safe_value(value): retval = str(value or '-') if any(_ in retval for _ in (' ', '"')): retval = "\"%s\"" % retval.replace('"', '""') retval = re.sub(r"[\x0a\x0d]", " ", retval) return retval def flush_condensed_events(single=False): while True: if not single: time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD) with _condensing_lock: for key in _condensed_events: condensed = False events = _condensed_events[key] first_event = events[0] condensed_event = [_ for _ in first_event] for i in xrange(1, len(events)): current_event = events[i] for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto if current_event[j] != condensed_event[j]: condensed = True if not isinstance(condensed_event[j], set): condensed_event[j] = set((condensed_event[j],)) condensed_event[j].add(current_event[j]) if condensed: for i in xrange(len(condensed_event)): if isinstance(condensed_event[i], set): condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i])) log_event(condensed_event, skip_condensing=True) _condensed_events.clear() if single: break def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False): global _condensing_thread if _condensing_thread is None: _condensing_thread = threading.Thread(target=flush_condensed_events) _condensing_thread.daemon = True _condensing_thread.start() try: sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple if ignore_event(event_tuple): return if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip if not skip_write: localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec) if not skip_condensing: if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS): with _condensing_lock: key = (src_ip, trail) if key not in _condensed_events: _condensed_events[key] = [] _condensed_events[key].append(event_tuple) return current_bucket = sec // config.PROCESS_COUNT if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling _thread_data.log_bucket = current_bucket _thread_data.log_trails = set() else: if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))): return else: _thread_data.log_trails.add((src_ip, trail)) _thread_data.log_trails.add((dst_ip, trail)) event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:])) if not config.DISABLE_LOCAL_LOG_STORAGE: handle = get_event_log_handle(sec) os.write(handle, event.encode(UNICODE_ENCODING)) if config.LOG_SERVER: if config.LOG_SERVER.count(':') > 1: remote_host, remote_port = config.LOG_SERVER.replace('[', '').replace(']', '').rsplit(':', 1) # Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py _AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0) _NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV _address = socket.getaddrinfo(remote_host, int(remote_port) if str(remote_port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4] else: remote_host, remote_port = config.LOG_SERVER.split(':') _address = (remote_host, int(remote_port)) s = socket.socket(socket.AF_INET if len(_address) == 2 else socket.AF_INET6, socket.SOCK_DGRAM) s.sendto(("%s %s" % (sec, event)).encode(UNICODE_ENCODING), _address) if config.SYSLOG_SERVER or config.LOGSTASH_SERVER: severity = "medium" if config.REMOTE_SEVERITY_REGEX: match = re.search(config.REMOTE_SEVERITY_REGEX, info) if match: for _ in ("low", "medium", "high"): if match.group(_): severity = _ break if config.SYSLOG_SERVER: extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference) _ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(config.TRAILS_FILE))), name=info, severity={"low": 0, "medium": 1, "high": 2}.get(severity), extension=extension) remote_host, remote_port = config.SYSLOG_SERVER.split(':') s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.sendto(_.encode(UNICODE_ENCODING), (remote_host, int(remote_port))) if config.LOGSTASH_SERVER: _ = OrderedDict((("timestamp", sec), ("sensor", HOSTNAME), ("severity", severity), ("src_ip", src_ip), ("src_port", src_port), ("dst_ip", dst_ip), ("dst_port", dst_port), ("proto", proto), ("type", trail_type), ("trail", trail), ("info", info), ("reference", reference))) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) remote_host, remote_port = config.LOGSTASH_SERVER.split(':') s.sendto(json.dumps(_).encode(UNICODE_ENCODING), (remote_host, int(remote_port))) if (config.DISABLE_LOCAL_LOG_STORAGE and not any((config.LOG_SERVER, config.SYSLOG_SERVER))) or config.console: sys.stderr.write(event) sys.stderr.flush() if config.plugin_functions: for _ in config.plugin_functions: _(event_tuple, packet) except (OSError, IOError): if config.SHOW_DEBUG: traceback.print_exc() def log_error(msg, single=False): if single: if msg in _single_messages: return else: _single_messages.add(msg) try: handle = get_error_log_handle() os.write(handle, ("%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg)).encode(UNICODE_ENCODING)) except (OSError, IOError): if config.SHOW_DEBUG: traceback.print_exc() def start_logd(address=None, port=None, join=False): class ThreadingUDPServer(_socketserver.ThreadingMixIn, _socketserver.UDPServer): pass class UDPHandler(_socketserver.BaseRequestHandler): def handle(self): try: data, _ = self.request if data[0:1].isdigit(): # Note: regular format with timestamp in front sec, event = data.split(b' ', 1) else: # Note: naive format without timestamp in front event_date = datetime.datetime.strptime(data[1:data.find(b'.')].decode(UNICODE_ENCODING), TIME_FORMAT) sec = int(time.mktime(event_date.timetuple())) event = data if not event.endswith(b'\n'): event = b"%s\n" % event handle = get_event_log_handle(int(sec), reuse=False) os.write(handle, event) os.close(handle) except: if config.SHOW_DEBUG: traceback.print_exc() # IPv6 support if ':' in (address or ""): address = address.strip("[]") _socketserver.UDPServer.address_family = socket.AF_INET6 # Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py _AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0) _NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV _address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4] else: _address = (address or '', int(port) if str(port or "").isdigit() else 0) server = ThreadingUDPServer(_address, UDPHandler) print("[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1])) if join: server.serve_forever() else: thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() def set_sigterm_handler(): def handler(signum, frame): log_error("SIGTERM") raise SystemExit if hasattr(signal, "SIGTERM"): signal.signal(signal.SIGTERM, handler) if __name__ != "__main__": set_sigterm_handler()
ldapcheck.py
#!/usr/bin/env python """ ldap check service Author: George Mihalcea Created: 22.03.2017 """ import yaml import ldap import socket import sys import os import signal import multiprocessing from threading import Thread from time import sleep CONFIG_FILE = '../config/config.yml' def sig_term(mysignal, frame): """Handling termination signals """ print("Signal %s frame %s - exiting." % (mysignal, frame)) raise ExitDaemon class ExitDaemon(Exception): """Exception used to exit daemon """ pass def get_config(config): """Reads config file. Returns config options as dictionary. """ cfg = {} try: cfg = yaml.load(open(config, 'r')) if 'HOST' not in cfg: cfg['HOST'] = '0.0.0.0' if 'DATA_SIZE' not in cfg: cfg['DATA_SIZE'] = 512 if 'SLEEP' not in cfg: cfg['SLEEP'] = 1 if 'DEBUG' not in cfg: cfg['DEBUG'] = False if 'INFO' not in cfg: cfg['INFO'] = False except yaml.scanner.ScannerError as err: print('Invalid config file. Error: %s. Exiting...' % err) sys.exit(1) return cfg class ConnThread(Thread): """Thread that handles one connection """ def __init__(self, conn, config, secure): Thread.__init__(self) self.conn = conn self.config = config self.secure = secure if self.config['INFO']: print('New thread started') def send_response(self, code, resp): """Send http response """ try: self.conn.send('HTTP/1.0 %s\n' % code) self.conn.send('Content-Type: text/plain\n\n') self.conn.send('%s\n' % resp) except socket.error, e: if self.config['DEBUG']: print(e) pass self.conn.close() def run(self): while True: data = self.conn.recv(config['DATA_SIZE']) break # GET or HEAD if data.startswith('GET /') or data.startswith('HEAD /'): try: if self.secure: ld = ldap.initialize(self.config['URL_S']) ld.simple_bind_s(self.config['USER'], self.config['PASS']) else: ld = ldap.initialize(self.config['URL']) ld.start_tls_s() ld.simple_bind_s(self.config['USER'], self.config['PASS']) code = '200 OK' resp = 'OK' except ldap.LDAPError, e: code = '503 Service Unavailable' resp = e if self.config['DEBUG']: print(e) self.send_response(code, resp) else: code = '400 Invalid Request' resp = 'Invalid Request' self.send_response(code, resp) def socket_worker_process(config, secure=True): """Open a TCP socket and listen for incoming connections """ serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) while True: try: if secure: serversocket.bind((config['HOST'], config['PORT_S'])) else: serversocket.bind((config['HOST'], config['PORT'])) except Exception as exc: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] if config['DEBUG']: print('Exception: %s %s %s %s' % (exc, exc_type, fname, exc_tb.tb_lineno)) else: try: serversocket.listen(5) except Exception as exc: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] if config['DEBUG']: print('Exception: %s %s %s %s' % (exc, exc_type, fname, exc_tb.tb_lineno)) while True: try: conn, addr = serversocket.accept() if config['INFO']: print('Connection accepted: %s %s', conn, addr) thread = ConnThread(conn, config, secure) thread.start() except KeyboardInterrupt: pass except Exception as exc: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] if config['DEBUG']: print('Exception: %s %s %s %s' % (exc, exc_type, fname, exc_tb.tb_lineno)) try: sleep(1) except KeyboardInterrupt: pass return if __name__ == '__main__': """main program """ config = get_config(CONFIG_FILE) # a dictionary for saving processes info for debugging purposes process_list = {} # start ldap worker process ldap_worker = multiprocessing.Process(target=socket_worker_process, args=(config, False)) ldap_worker.daemon = True ldap_worker.start() process_list['ldap_worker'] = ldap_worker print('Started process %s with pid %s' % ('ldap_worker', ldap_worker.pid)) # start ldaps worker process ldaps_worker = multiprocessing.Process(target=socket_worker_process, args=(config,)) ldaps_worker.daemon = True ldaps_worker.start() process_list['ldaps_worker'] = ldaps_worker print('Started process %s with pid %s' % ('ldaps_worker', ldaps_worker.pid)) # define signals signal.signal(signal.SIGTERM, sig_term) signal.signal(signal.SIGINT, sig_term) signal.signal(signal.SIGHUP, sig_term) while True: try: signal.pause() except ExitDaemon: break
channel_mapper.py
#! /usr/bin/python # # Qt example for VLC Python bindings # Copyright (C) 2009-2010 the VideoLAN team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. # import sys import os.path from PyQt5.QtCore import Qt, QTimer, QRect, QSize from PyQt5.QtGui import QPalette, QColor, QPixmap, QIcon, QFont from PyQt5.QtWidgets import QMainWindow, QWidget, QFrame, QSlider, QHBoxLayout, QPushButton, \ QVBoxLayout, QAction, QFileDialog, QApplication, \ QLabel, QLineEdit, QCheckBox, QRadioButton, QListWidget, QListWidgetItem import vlc import time import json import pickle import tqdm import re from urllib import request import threading import functools import copy # Function for Channel Mapping def read_m3u(filepath): stream_db = [] with open(filepath, encoding='UTF8') as f: stream_info = None extinf_pattern = re.compile('#EXTINF:[\-0-9]+(.*),(.*)$') attr_pattern = re.compile('tvg-id=\"([0-9]+)\" tvg-logo=\"([0-9A-Za-z.:/_?=]+)\" .*tvh-chnum=\"([0-9]+)\"') for line in f.readlines(): if line.startswith('#EXTM3U'): pass elif line.startswith('#EXTINF'): m = extinf_pattern.match(line) attrs, ch_name = m.group(1), m.group(2) m = attr_pattern.match(attrs.strip()) tvg_id = m.group(1) if m else "" tvg_logo = m.group(2) if m else "" tvh_chnum = m.group(3) if m else "" stream_info = { 'tvg-id': tvg_id.strip(), 'tvg-logo': tvg_logo.strip(), 'tvh-chnum': tvh_chnum.strip(), 'ch-name': ch_name.strip(), } print(stream_info) elif line.startswith('udp://'): stream_info['multicast'] = line.strip('\r\n') stream_db.append(stream_info) return stream_db def remove_unmapped_channel(playlist, stream_urls): out = [] for line in playlist: if line['multicast'] in stream_urls: out.append(line) return out def fill_unmapped_channel(playlist, channels): mapped = dict([(_['tvg-id'], _) for _ in playlist]) for service_id in channels: channel = channels[service_id] stream_info = { 'tvg-id': channel['ServiceId'], 'tvg-logo': channel['Icon_url'], 'tvh-chnum': channel['SKBCh'], 'ch-name': channel['SKB Name'], } if service_id in mapped: mapped[service_id].update(stream_info) else: stream_info['multicast'] = None playlist.append(stream_info) return playlist # VLC Error Check vlc_error_count = 0 vlc_error_check = False vlc_handle_mode = 0 @vlc.CallbackDecorators.LogCb def vlc_log_callback(data, level, ctx, fmt, args): global vlc_error_count, vlc_error_check, vlc_handle_mode if level >= 3: vlc_error_count += 1 if vlc_error_count > 500 and vlc_error_check == False: vlc_error_check = True if vlc_handle_mode != 0: player.disableChannel() print('Disabled Channel') if len(sys.argv) < 3: print('$ python %s [Recent Scanned M3U] [Used M3U] [--clear]' % sys.argv[0]) print('# M3U can be make by MCTV Playlist Creator') print('# If you want to clean cache, run with --clear.') print('# SKBroadband Multicast Range : 239.192.38.1-239.192.150.254 [49220]') sys.exit(1) class Player(QMainWindow): """A simple Media Player using VLC and Qt """ def __init__(self, master=None): if '--clear' in sys.argv: os.remove('.cache') # Read Data if not os.path.exists('AllChannel.json'): print('Reffering channel info from server.') print('If you want to customize channel data, generate AllChannel.json file by generate_SKB_channels.py.', file=sys.stderr) from generate_SKB_channels import get_channels channels = dict([(_['ServiceId'], _) for _ in get_channels()]) else: print('Using AllChannel.json for referring channel info.') with open('AllChannel.json', encoding='UTF8') as f: channels = dict([(_['ServiceId'], _) for _ in json.loads(f.read())]) new_stream_db = read_m3u(sys.argv[1]) old_stream_db = read_m3u(sys.argv[2]) stream_urls = [_['multicast'] for _ in new_stream_db] if os.path.exists('.cache'): with open('.cache', 'rb') as f: cache = pickle.loads(f.read()) else: cache = {} # Checking broken stream global vlc_error_count, vlc_error_check, vlc_handle_mode instance = vlc.Instance("--verbose=-1") instance.log_set(vlc_log_callback, None) mediaplayer = instance.media_player_new() mediaplayer.video_set_scale(0.1) print('Checking broken stream...') for url in tqdm.tqdm(stream_urls): if not url in cache: vlc_error_count = 0 vlc_error_check = False mediaplayer.set_media(instance.media_new(url)) mediaplayer.play() time.sleep(3) cache[url] = vlc_error_check with open('.cache', 'wb') as f: f.write(pickle.dumps(cache)) mediaplayer.stop() self.stream_verify = cache vlc_handle_mode = 1 ############################## playlist = remove_unmapped_channel(old_stream_db, stream_urls) playlist = fill_unmapped_channel(playlist, channels) self.channel_info = channels self.playlist = playlist self.stream_urls = stream_urls import pprint #pprint.pprint(update_db) print(len(old_stream_db), len(playlist), len(new_stream_db), len(channels)) # QT Initialize QMainWindow.__init__(self, master) self.setWindowTitle("Media Player") # creating a basic vlc instance self.instance = vlc.Instance("--verbose=-1") self.instance.log_set(vlc_log_callback, None) # creating an empty vlc media player self.mediaplayer = self.instance.media_player_new() self.createUI() def createUI(self): """Set up the user interface, signals & slots """ self.widget = QWidget(self) self.setCentralWidget(self.widget) # In this widget, the video will be drawn if sys.platform == "darwin": # for MacOS from PyQt5.QtWidgets import QMacCocoaViewContainer self.videoframe = QMacCocoaViewContainer(0) else: self.videoframe = QFrame() self.palette = self.videoframe.palette() self.palette.setColor (QPalette.Window, QColor(0,0,0)) self.videoframe.setPalette(self.palette) self.videoframe.setAutoFillBackground(True) self.videoframe.setMinimumWidth(720) self.videoframe.setMinimumHeight(480) #self.hbuttonbox = QHBoxLayout() #self.openchannel = QPushButton("Open Channel Data") #self.hbuttonbox.addWidget(self.openchannel) #self.openchannel.clicked.connect(self.PlayPause) self.hcontrolbox = QHBoxLayout() self.hinfobox = QHBoxLayout() self.icon = QLabel() self.icon.setFixedSize(200, 60) self.icon.setAlignment(Qt.AlignCenter) self.hinfobox.addWidget(self.icon) self.vinfobox = QVBoxLayout() self.ch_name = QLabel("Loading...") font = QFont() font.setBold(True) font.setFamily('Malgun Gothic') font.setPointSize(16) self.ch_name.setFont(font) self.vinfobox.addWidget(self.ch_name) self.hservicebox = QHBoxLayout() self.hservicebox.addWidget(QLabel('Service ID ')) self.service_id = QLabel("[#]") self.hservicebox.addWidget(self.service_id) self.vinfobox.addLayout(self.hservicebox) self.hinfobox.addLayout(self.vinfobox) self.hcontrolbox.addLayout(self.hinfobox) self.hcontrolbox.addStretch(1) self.volumeslider = QSlider(Qt.Horizontal, self) self.volumeslider.setMaximum(100) self.volumeslider.setValue(self.mediaplayer.audio_get_volume()) self.volumeslider.setToolTip("Volume") self.hcontrolbox.addWidget(self.volumeslider) self.volumeslider.valueChanged.connect(self.setVolume) # self.channelbox = QVBoxLayout() self.channellist = QListWidget() self.channellist.setFixedWidth(320) self.channellist.itemClicked.connect(self.selectChannel) self.channelbox.addWidget(self.channellist) self.channelfilter = QLineEdit() self.channelfilter.setFixedWidth(320) self.channelfilter.textChanged.connect(self.find_channel) self.channelbox.addWidget(self.channelfilter) self.streambox = QVBoxLayout() self.streamlist = QListWidget() self.streamlist.setFixedWidth(320) self.streamlist.itemClicked.connect(self.selectStream) self.streambox.addWidget(self.streamlist) self.mapbutton = QPushButton("Map") self.mapbutton.clicked.connect(self.map) self.streambox.addWidget(self.mapbutton) self.vboxlayout = QVBoxLayout() self.vboxlayout.addWidget(self.videoframe) self.vboxlayout.addLayout(self.hcontrolbox) self.hboxlayout = QHBoxLayout() self.hboxlayout.addLayout(self.vboxlayout) self.hboxlayout.addLayout(self.channelbox) self.hboxlayout.addLayout(self.streambox) self.widget.setLayout(self.hboxlayout) export = QAction("&Export", self) export.triggered.connect(self.ExportFile) exit = QAction("E&xit", self) exit.triggered.connect(sys.exit) menubar = self.menuBar() filemenu = menubar.addMenu("&File") filemenu.addAction(export) filemenu.addSeparator() filemenu.addAction(exit) self.updatePlaylist() def ExportFile(self): print(os.path.expanduser('~')) #filename = QFileDialog.getSaveFileName(self, "Save File", os.path.expanduser('~'),)[0] filename = QFileDialog.getSaveFileName(self, "Export Playlist File", 'playlist.m3u', "M3U Playlist (*.m3u *.m3u8)",)[0] if not filename: return with open(filename, 'wt', encoding='UTF8') as f: f.write('#EXTM3U\n') for item in [self.channellist.item(_) for _ in range(self.channellist.count())]: if item.checkState(): data = item.data(Qt.UserRole) f.write(f"#EXTINF:-1 tvg-id=\"{data['tvg-id']}\" tvg-logo=\"{data['tvg-logo']}\" tvg-chno=\"{data['tvh-chnum']}\" tvh-chnum=\"{data['tvh-chnum']}\", {data['ch-name']}\n") f.write(f"{data['multicast']}\n") filename = QFileDialog.getSaveFileName(self, "Export Channel File", 'Channel.json', "JSON File (*.json)",)[0] if not filename: return channels = [] for item in [self.channellist.item(_) for _ in range(self.channellist.count())]: if item.checkState(): data = item.data(Qt.UserRole) channels.append(self.channel_info[data['tvg-id']]) with open(filename, 'wt', encoding='UTF8') as f: f.write(json.dumps(channels, indent=2)) def setVolume(self, Volume): """Set the volume """ self.mediaplayer.audio_set_volume(Volume) def find_channel(self, text): if text: for item in [self.channellist.item(_) for _ in range(self.channellist.count())]: if item.text().lower().find(text.lower()) >= 0: item.setHidden(False) else: item.setHidden(True) else: for item in [self.channellist.item(_) for _ in range(self.channellist.count())]: item.setHidden(False) def map(self, *args, **kwargs): item = self.channellist.currentItem() item.setCheckState(Qt.Checked) channel = item.data(Qt.UserRole) sitem = self.streamlist.currentItem() radio = self.streamlist.itemWidget(sitem) channel['multicast'] = radio.text() item.setData(Qt.UserRole, channel) self.updateMappedInfo() def playStream(self, stream_url): global vlc_error_count, vlc_error_check vlc_error_count = 0 vlc_error_check = False self.media = self.instance.media_new(stream_url) # put the media in the media player self.mediaplayer.set_media(self.media) # parse the metadata of the file self.media.parse() # set the title of the track as window title self.setWindowTitle(self.media.get_meta(0)) # the media player has to be 'connected' to the QFrame # (otherwise a video would be displayed in it's own window) # this is platform specific! # you have to give the id of the QFrame (or similar object) to # vlc, different platforms have different functions for this if sys.platform.startswith('linux'): # for Linux using the X Server self.mediaplayer.set_xwindow(self.videoframe.winId()) elif sys.platform == "win32": # for Windows self.mediaplayer.set_hwnd(self.videoframe.winId()) elif sys.platform == "darwin": # for MacOS self.mediaplayer.set_nsobject(int(self.videoframe.winId())) self.mediaplayer.play() def selectChannel(self, item): global vlc_error_count, vlc_error_check vlc_error_count = 0 vlc_error_check = False channel = item.data(Qt.UserRole) print(channel) if 'multicast' in channel and channel['multicast']: streams = dict([(item.data(Qt.UserRole), item) for item in [self.streamlist.item(_) for _ in range(self.streamlist.count())]]) if channel['multicast'] in streams: self.selectStream(streams[channel['multicast']]) else: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Unchecked) self.ch_name.setText(channel['ch-name']) frame = self.getIcon(channel['tvg-logo']) pixmap = QPixmap() pixmap.loadFromData(frame) self.icon.setPixmap(pixmap.scaled(self.icon.width(), self.icon.height(), Qt.KeepAspectRatio)) self.service_id.setText(f"[{channel['tvg-id']}]") def updateStreamRadioState(self, state): if state: found = False for item in [self.streamlist.item(_) for _ in range(self.streamlist.count())]: radio = self.streamlist.itemWidget(item) if radio.isChecked(): self.channellist.currentItem().setCheckState(Qt.Checked) item.setText("") self.streamlist.setCurrentItem(item) self.streamlist.update(self.streamlist.currentIndex()) self.selectStreamImpl(item) found = True break if not found: item = self.streamlist.item(0) self.streamlist.setCurrentItem(item) self.selectStreamImpl(item) def selectStream(self, item): stream_info = item.data(Qt.UserRole) radio = self.streamlist.itemWidget(item) radio.setChecked(True) def selectStreamImpl(self, item): global vlc_error_count, vlc_error_check vlc_error_count = 0 vlc_error_check = False radio = self.streamlist.itemWidget(item) url = radio.text() item.setData(Qt.UserRole, url) print('URL from Radio :', url) if url: self.playStream(url) #citem = self.channellist.currentItem() #data = citem.data(Qt.UserRole) #data['multicast'] = url #citem.setData(Qt.UserRole, data) #print('Saved URL :', url) else: pass #self.mediaplayer.pause() #citem = self.channellist.currentItem() #data = citem.data(Qt.UserRole) #data['multicast'] = None #citem.setData(Qt.UserRole, data) #print('Saved URL :', None) self.updateMappedInfo() def getMappedDict(self): mapped_dict = {} for item in [self.channellist.item(_) for _ in range(self.channellist.count())]: data = item.data(Qt.UserRole) if data['multicast']: if data['multicast'] in mapped_dict: mapped_dict[data['multicast']].append(data['ch-name']) else: mapped_dict[data['multicast']] = [data['ch-name']] return mapped_dict def updateMappedInfo(self): print('UpdateMappedInfo') mapped_dict = self.getMappedDict() for item in [self.streamlist.item(_) for _ in range(self.streamlist.count())]: url = item.data(Qt.UserRole) if url == '__BROKEN__': item.setText("*** BROKEN ***") elif url in mapped_dict: item.setText(f"[{','.join(mapped_dict[url])}]" if mapped_dict[url] else "") else: item.setText("") self.streamlist.update(self.streamlist.indexFromItem(item)) def disableChannel(self): self.channellist.currentItem().setCheckState(Qt.Unchecked) self.channellist.update(self.channellist.currentIndex()) data = self.channellist.currentItem().data(Qt.UserRole) data['multicast'] = None self.channellist.currentItem().setData(Qt.UserRole, data) self.streamlist.update(self.streamlist.currentIndex()) self.streamlist.currentItem().setData(Qt.UserRole, '__BROKEN__') self.updateMappedInfo() #app.processEvents() @functools.lru_cache(maxsize=None) def getIcon(self, url): req = request.Request(url) req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0') frame = None for _ in range(3): try: resp = request.urlopen(req) frame = resp.read() break except: time.sleep(1) return frame def attachIcon(self, item): channel = item.data(Qt.UserRole) url = channel['tvg-logo'] pixmap = QPixmap() pixmap.loadFromData(self.getIcon(url)) item.setIcon(QIcon(pixmap)) def asyncCacheIcon(self): def asyncThread(listwidget): for item in [listwidget.item(_) for _ in range(listwidget.count())]: channel = item.data(Qt.UserRole) self.attachIcon(item) listwidget.update(listwidget.indexFromItem(item)) threading.Thread(target=asyncThread, args=(self.channellist,), daemon=True).start() def updatePlaylist(self): self.channellist.clear() for channel in self.playlist: item = QListWidgetItem() item.setData(Qt.UserRole, channel) item.setText(f"[{channel['tvh-chnum']}] {channel['ch-name']}") item.setCheckState(Qt.Checked if channel['multicast'] and not self.stream_verify[channel['multicast']] else Qt.Unchecked) self.channellist.addItem(item) item = QListWidgetItem() item.setData(Qt.UserRole, '') self.streamlist.addItem(item) radio = QRadioButton('Unbound Stream URL') radio.toggled.connect(self.updateStreamRadioState) self.streamlist.setItemWidget(item, radio) for url in self.stream_urls: item = QListWidgetItem() item.setTextAlignment(Qt.AlignRight) item.setData(Qt.UserRole, '__BROKEN__' if self.stream_verify[url] else url) self.streamlist.addItem(item) radio = QRadioButton(url) radio.toggled.connect(self.updateStreamRadioState) self.streamlist.setItemWidget(item, radio) self.updateMappedInfo() self.asyncCacheIcon() def delayedSelectChannel(n): time.sleep(n) self.channellist.setCurrentRow(0) self.selectChannel(self.channellist.currentItem()) threading.Thread(target=delayedSelectChannel, args=(3,), daemon=True).start() if __name__ == "__main__": app = QApplication(sys.argv) player = Player() player.show() player.resize(720, 480) sys.exit(app.exec_())
c.py
import os import time import threading cmd = "sudo iw dev wlan0 station dump | awk '($1 ~ /Station$/) {s = $2;print s;}' >> num.txt" def clock(): on='ifconfig wlan0 up' off='ifconfig wlan0 down' while 1: os.system(off) os.system(on) time.sleep(60) def ap2(): free_sfr = True while 1: if free_sfr == True: os.system('sudo create_ap -n wlan0 FreeWifi') free_sfr = False else: os.system('sudo create_ap -n wlan0 SfrWifi') free_sfr = True def ap(): os.system('sudo create_ap -n wlan0 orange') ct = threading.Thread(target=clock,args=()) #ct.start() t = threading.Thread(target=ap,args=()) t.start() while 1: i=0 timestamp = time.time() date = time.ctime() os.system(cmd) count = 0 g = open('address.txt','r') f = open('num.txt','r') add = g.readlines() num = f.readlines() print num append = True for mac in num: timestamp = time.time() if len(add) == 0: g.close() g=open('address.txt','w') g.write(mac[:-1]+','+str(timestamp)+','+str(date)+'\n') g.close() else: g.close() g=open('address.txt','a') g.write(mac[:-1]+','+str(timestamp)+','+str(date)+'\n') g.close() #for i in range(len(add)): # if mac == add[i]: # append = False #if append == True: #g.close() #g=open('address.txt','a') #g.write(str(mac)+','+str(timestamp)) #g.close() f.close() os.system('sudo rm num.txt') g=open('address.txt','r') count = g.readlines() print len(count) time.sleep(5)
socketserver_forking.py
import os import socketserver class ForkingEchoRequestHandler(socketserver.BaseRequestHandler): def handle(self): # Echo the back to the client data = self.request.recv(1024) cur_pid = os.getpid() response = b'%d: %s' % (cur_pid, data) self.request.send(response) return class ForkingEchoServer(socketserver.ForkingMixIn, socketserver.TCPServer, ): pass if __name__ == '__main__': import socket import threading address = ('localhost', 0) # let the kernel assign a port server = ForkingEchoServer(address, ForkingEchoRequestHandler) ip, port = server.server_address # what port was assigned? t = threading.Thread(target=server.serve_forever) t.setDaemon(True) # don't hang on exit t.start() print('Server loop running in process:', os.getpid()) # Connect to the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ip, port)) # Send the data message = 'Hello, world'.encode() print('Sending : {!r}'.format(message)) len_sent = s.send(message) # Receive a response response = s.recv(1024) print('Received: {!r}'.format(response)) # Clean up server.shutdown() s.close() server.socket.close()
fts_throttler.py
# Copyright 2019 CERN for the benefit of the ATLAS collaboration. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Dilaksun Bavarajan <dilaksun@hotmail.com>, 2019 # - Martin Barisits <martin.barisits@cern.ch>, 2019 # - Brandon White <bjwhite@fnal.gov>, 2019-2020 # - Thomas Beermann <thomas.beermann@cern.ch>, 2020 # # PY3K COMPATIBLE """ Conveyor FTS Throttler is a daemon that will configure a fts storage's transfer settings depending on how many time out errors occur at the storage. If a storage has substantial amount of transer failures due to time outs, it is usually due to bad connectivity and the amount of failures can be alleviated by limiting the transfer settings of FTS transfers on the particular fts storage. """ from __future__ import division import logging import sys import threading import os import socket import time import traceback import json import datetime import requests from rucio.common.config import config_get from rucio.core import heartbeat from rucio.transfertool.fts3 import FTS3Transfertool logging.basicConfig(stream=sys.stdout, level=getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper()), format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s') graceful_stop = threading.Event() class FTSThrottler(object): def __init__(self, cycle_interval=3600): self.__cycle_interval = cycle_interval def tune(self): """ tune the configuration settings """ result = self.request_timeout_data() if result is not None: try: cycle_file = config_get('conveyor', 'fts_throttler_cycle') except Exception: logging.warn('could not get the cycle file, cannot perform tuning for this cycle without cycle file, returning') return try: tuning_ratio = config_get('conveyor', 'fts_throttler_tuning_ratio') except Exception: logging.warn('could not get the tuning ratio from config, returning') return rses = result['aggregations']['rse']['buckets'] cycle_info_dict = {'storages': []} for rse in rses: # if a rse has a failure ratio above the tuning ratio (percentage) we tune it. if rse['failure_ratio'].get('value') > int(tuning_ratio): # rse_info holds the storage name(0) and FTS-host server(1) rse_info = rse['key'].split() # Tapes might have other reasons for timeouts which should be treated differently, therefor they are ignored and not tuned for now. if rse['storage_type']['hits']['hits'][0]['_source']['payload']['dst-type'] == 'TAPE': logging.info('%s is a tape storage type, it will not be tuned', rse_info[0]) continue # instantiate transfertool for access to get_se_config and set_se_config. t = FTS3Transfertool(rse_info[1]) # extract FTS storage from dst-url tmp = rse['destination']['hits']['hits'][0]['_source']['payload']['dst-url'].split(':', 2) url = tmp[0] + ':' + tmp[1] n = rse['failure_ratio'].get('value') logging.info(' RSE ' + rse_info[0] + ' on FTS host ' + rse_info[1] + ' has failure ratio ' + str(rse['failure_ratio'].get('value')) + ' on storage ' + url) # NOQA: W503 try: se = t.get_se_config(url) logging.info('storage settings: %s', se) except KeyError: logging.warn('configuration for storage element was not found, config will be set from default values') # all FTS Host servers have a default reference storage named '*' that holds the default values for all storages that arent listed yet. default_storage = t.get_se_config('*') t.set_se_config(url, inbound_max_active=int((100 / (100 + n)) * default_storage['se_info']['inbound_max_active']), outbound_max_active=int((100 / (100 + n)) * default_storage['se_info']['outbound_max_active'])) logging.info(url + 'inbound_max_active changed from ' + str(default_storage['se_info']['inbound_max_active']) + ' to ' + str(int((100 / (100 + n)) * default_storage['se_info']['inbound_max_active'])) + ', outbound_max_active changed from ' + str(default_storage['se_info']['outbound_max_active']) + ' to ' + str(int((100 / (100 + n)) * default_storage['se_info']['outbound_max_active']))) # NOQA: W503 # cycle_info_dict is used to write changes down to the cycle file. cycle_info_dict['storages'].append({'storage': url, 'inbound_max_active': default_storage['se_info']['inbound_max_active'], 'outbound_max_active': default_storage['se_info']['outbound_max_active'], 'failure_ratio': n, 'tuned_inbound_max_active': int((100 / (100 + n)) * default_storage['se_info']['inbound_max_active']), 'tuned_outbound_max_active': int((100 / (100 + n)) * default_storage['se_info']['outbound_max_active']), 'fts-host': rse_info[1], 'time': str(datetime.datetime.now())}) continue except Exception as error: logging.warn('an error occured when trying to get the storage configuration') logging.warn(str(error)) continue # Even though we could read the config, we still need to know if the important attributes are empty. if se['se_info']['inbound_max_active'] is None: try: default_storage = t.get_se_config('*') except Exception: raise Exception('Could not retrieve the default storage information') ima = default_storage['se_info']['inbound_max_active'] else: ima = se['se_info']['inbound_max_active'] if se['se_info']['outbound_max_active'] is None: try: default_storage = t.get_se_config('*') except Exception: raise Exception('Could not retrieve the default storage information') oma = default_storage['se_info']['outbound_max_active'] else: oma = se['se_info']['outbound_max_active'] # append existing information to dict and write to file. cycle_info_dict['storages'].append({'storage': url, 'inbound_max_active': ima, 'outbound_max_active': oma, 'failure_ratio': n, 'tuned_inbound_max_active': int((100 / (100 + n)) * ima), 'tuned_outbound_max_active': int((100 / (100 + n)) * oma), 'fts-host': rse_info[1], 'time': str(datetime.datetime.now())}) # tune down the configuration of a storage relative to the failure ratio(n) and existing configuration. t.set_se_config(url, inbound_max_active=int((100 / (100 + n)) * ima), outbound_max_active=int((100 / (100 + n)) * oma)) logging.info(url + 'inbound_max_active changed from ' + str(ima) + ' to ' + str(int((100 / (100 + n)) * ima)) + ', outbound_max_active changed from ' + str(oma) + ' to ' + str(int((100 / (100 + n)) * oma))) # NOQA: W503 if cycle_info_dict['storages'] == []: logging.info('no storages are failing significantly due to timeout errors, therefor no tuning happened.') with open(cycle_file, 'w') as outfile: json.dump(cycle_info_dict, outfile) else: logging.warn('Could not detect any storages with sufficient failure ratio for tuning, trying again next cycle') return def revert(self): """ Reverts the changes from previous tuning, this is to avoid recursively tuning with no reference point, the manually configured attributes or the default attributes will stay as the reference point Before each cycle, all tunings will be reverted to the original reference point. :returns: bool indicating if revert was successful or not. """ try: cycle_file = config_get('conveyor', 'fts_throttler_cycle') except Exception: logging.warn('could not get the cycle file, cannot revert cycle changes, therefor no tuning either') return False with open(cycle_file) as cycle_info: cycle_info_dict = json.load(cycle_info) storages = cycle_info_dict['storages'] for storage in storages: t = FTS3Transfertool(storage['fts-host']) logging.info('storage information: %s', storage) t.set_se_config(storage['storage'], inbound_max_active=storage['inbound_max_active'], outbound_max_active=storage['outbound_max_active']) logging.info('on storage ' + storage['storage'] + ' outbound_max_active reverted from ' + str(storage['tuned_outbound_max_active']) + ' to ' + str(storage['outbound_max_active']) # NOQA: W503 + ', inbound_max_active reverted from ' + str(storage['tuned_inbound_max_active']) + ' to ' + str(storage['inbound_max_active'])) # NOQA: W503 logging.info('revert performed') return True def request_timeout_data(self, destination=True, last_hours=1, transfer_successes_lower_boundary=20, transfer_timeouts_lower_boundary=20, kserver='http://atlas-kibana.mwt2.org:9200/rucio-events-*/_search'): """ requests timeout data using elastic search :returns: JSON result of the elastic search query. :param destination: bool that decides whether to query for source rse's(false) or destination rse's(true) :param last_hours: integer to choose how many hours back we want to query from :param transfer_successes_lower_boundary: integer for the lower boundary for transfers succeeded on a rse. :param transfer_timeouts_lower_boundary: integer for the lower boundary of timeout events that happened on a rse. """ params_dict = { 'query': { 'bool': { 'must': [{ 'range': { '@timestamp': { 'gte': 'now-' + str(last_hours) + 'h', 'lte': 'now', 'format': 'epoch_millis' } } }] } }, 'size': 0, 'aggs': { 'rse': { 'terms': { }, 'aggs': { 'destination': { 'top_hits': { '_source': { }, 'size': 1 } }, 'storage_type': { 'top_hits': { '_source': { }, 'size': 1 } }, 'transfers_failed_timeout': { 'filter': { 'bool': { 'should': [ # this is the list of the errors so far that are taken into consideration for the ratio calculation, add more if needed. {'match': {'payload.reason': {'query': 'TRANSFER [110] TRANSFER Operation timed out', 'operator': 'and'}}}, {'match': {'payload.reason': {'query': ('TRANSFER [110] TRANSFER Transfer canceled because the gsiftp performance marker ' 'timeout of 360 seconds has been exceeded, or all performance ' 'markers during that period indicated zero bytes transferred'), 'operator': 'and'}}}, {'match': {'payload.reason': {'query': ('SOURCE [70] globus_ftp_client: the server responded with an error 421 Service busy:' ' Connection limit exceeded. Please try again later. Closing control connection.'), 'operator': 'and'}}}, ], 'minimum_should_match': 1 } } }, 'timeout_bucket_filter': { 'bucket_selector': { 'buckets_path': { 'timeoutCount': 'transfers_failed_timeout>_count' }, 'script': 'params.timeoutCount > ' + str(transfer_timeouts_lower_boundary) } }, 'transfers_succeeded': { 'filter': { 'bool': { 'must': [ {'term': {'type': 'transfer-done'}} ] } } }, 'success_bucket_filter': { 'bucket_selector': { 'buckets_path': { 'transferSuccessCount': 'transfers_succeeded>_count' }, 'script': 'params.transferSuccessCount > ' + str(transfer_successes_lower_boundary) } }, 'failure_ratio': { 'bucket_script': { 'buckets_path': { 'transfersFailedTimeout': 'transfers_failed_timeout>_count', 'transfersSucceeded': 'transfers_succeeded>_count' }, 'script': 'params.transfersFailedTimeout / params.transfersSucceeded * 100' } } } } } } # if destination is true, we request data for destination RSE's, else we request source RSE's if destination: params_dict['aggs']['rse']['terms'] = {'script': """(doc['payload.dst-rse'].empty ? '' : doc['payload.dst-rse'].value) + ' ' + (doc['payload.transfer-endpoint'].empty ? '' : doc['payload.transfer-endpoint'].value)""", 'size': 1000} params_dict['aggs']['rse']['aggs']['destination']['top_hits']['_source'] = {'include': ['payload.dst-url']} params_dict['aggs']['rse']['aggs']['storage_type']['top_hits']['_source'] = {'include': ['payload.dst-type']} else: params_dict['aggs']['rse']['terms'] = {'script': """(doc['payload.src-rse'].empty ? '' : doc['payload.src-rse'].value) + ' ' + (doc['payload.transfer-endpoint'].empty ? '' : doc['payload.transfer-endpoint'].value)""", 'size': 1000} params_dict['aggs']['rse']['aggs']['destination']['top_hits']['_source'] = {'include': ['payload.src-url']} params_dict['aggs']['rse']['aggs']['storage_type']['top_hits']['_source'] = {'include': ['payload.src-type']} params_str = json.dumps(params_dict) try: result = requests.get(kserver, data=params_str, headers={'Content-Type': 'application/json'}, timeout=None) except Exception: logging.warn('could not retrieve transfer failure data from %s - %s', kserver, str(traceback.format_exc())) if result and result.status_code == 200: return result.json() raise Exception('could not get result from %s, status code returned : %s', kserver, result.status_code if result else None) def testread(self, tuning_ratio=25): """ Read the failure ratio of storages without tuning :returns: filtered JSON response from Elastic search. :param tuning_ratio: integer lower bound for what failing storages you want to read. """ result = self.request_timeout_data() if result is not None: rses = result['aggregations']['rse']['buckets'] for rse in rses: # if a rse has a failure ratio above the tuning ratio we read it. if rse['failure_ratio'].get('value') > tuning_ratio: # rse_info holds the storage name(0) and FTS-host server(1) rse_info = rse['key'].split() t = FTS3Transfertool(rse_info[1]) # extract FTS storage from dst-url tmp = rse['destination']['hits']['hits'][0]['_source']['payload']['dst-url'].split(':', 2) url = tmp[0] + ':' + tmp[1] logging.info('\033[91m RSE \033[0m' + rse_info[0] + '\033[91m on FTS host \033[0m' + rse_info[1] + '\033[91m has failure ratio \033[0m' + str(rse['failure_ratio'].get('value')) + '\033[91m on storage \033[0m' + url) try: se = t.get_se_config(url) logging.info('storage settings: %s', se) except KeyError: logging.warn('configuration for storage element was not found') except Exception as error: logging.warn('an error occured when trying to get the storage configuration') logging.warn(str(error)) continue return rses else: logging.warn('Could not retrieve timeout data with elastic search, trying again next cycle') def fts_throttler(once=False, cycle_interval=3600): """ Main loop to automatically configure FTS storages. """ graceful_stop.clear() logging.info('FTS Throttler starting') executable = 'conveyor-fts-throttler' hostname = socket.getfqdn() pid = os.getpid() hb_thread = threading.current_thread() heartbeat.sanity_check(executable=executable, hostname=hostname) heart_beat = heartbeat.live(executable, hostname, pid, hb_thread) prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) current_time = time.time() graceful_stop.wait(10) running_instance = False while not graceful_stop.is_set(): heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) if heart_beat['nr_threads'] < 2: running_instance = True # this loop cannot be entered by more than one instance at a time. while not graceful_stop.is_set(): try: heart_beat = heartbeat.live(executable, hostname, pid, hb_thread, older_than=3600) prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) logging.info(prepend_str + "fts_throttler start cycle") if FTSThrottler().revert(): logging.info('revert was successful, now tuning') FTSThrottler().tune() logging.info('Tuning finished for this cycle') else: logging.warn('could not revert, cannot tune unless revert has been done, will try again next cycle.') if once: break if time.time() < current_time + cycle_interval: graceful_stop.wait(int((current_time + cycle_interval) - time.time())) current_time = time.time() except Exception: logging.critical(prepend_str + 'fts_throttler crashed %s' % (traceback.format_exc())) if once: break else: prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'], heart_beat['nr_threads']) logging.info(prepend_str + 'another fts_throttler instance already exists. will wait') if time.time() < current_time + cycle_interval: graceful_stop.wait(int((current_time + cycle_interval) - time.time())) current_time = time.time() logging.info(prepend_str + 'Throttler - graceful stop requested') # before we stop, try to revert, but only if this instance was running the cycles. # ! If the cycle info file information is shared between instances, then this implementation must be changed ! if running_instance: try: FTSThrottler().revert() except Exception: logging.warn('could not revert changes before stopping') heartbeat.die(executable, hostname, pid, hb_thread) logging.info(prepend_str + 'Throttler - graceful stop done') def stop(signum=None, frame=None): """ Graceful exit. """ graceful_stop.set() def run(once=False, cycle_interval=3600): """ Starts up the conveyer fts throttler thread. """ logging.info('starting throttler thread') fts_throttler_thread = threading.Thread(target=fts_throttler, kwargs={'once': once, 'cycle_interval': cycle_interval}) fts_throttler_thread.start() logging.info('waiting for interrupts') # Interruptible joins require a timeout. fts_throttler_thread.join(timeout=3.14)
test_router.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 import time import threading from nose.tools import assert_equals, assert_raises from ..backends.base import BackendBase from ..apps.base import AppBase from ..router import Router def test_router_finds_apps(): router = Router() router.add_app("rapidsms.contrib.default") from rapidsms.contrib.default.app import App assert_equals(len(router.apps), 1) app = router.get_app("rapidsms.contrib.default") assert_equals(type(app), App) def test_router_returns_none_on_invalid_apps(): assert_equals(Router().get_app("not.a.valid.app"), None) def test_router_raises_on_uninstalled_apps(): assert_raises(KeyError, Router().get_app, "rapidsms.contrib.default") def test_router_starts_and_stops_apps_and_backends(): class MockApp(AppBase): def start(self): self.started = True def stop(self): self.stopped = True class MockBackend(BackendBase): def start(self): self.started = True BackendBase.start(self) def stop(self): self.stopped = True BackendBase.stop(self) router = Router() app = MockApp(router) router.apps.append(app) backend = MockBackend(router, "mock") router.backends["mock"] = backend assert hasattr(app, 'started') == False assert hasattr(app, 'stopped') == False assert hasattr(backend, 'started') == False assert hasattr(backend, 'stopped') == False # start in a separate thread, so we can test it asynchronously. worker = threading.Thread(target=router.start) worker.daemon = True worker.start() # wait until the router has started. while not router.running: time.sleep(0.1) assert_equals(app.started, True) assert_equals(backend.started, True) assert hasattr(app, 'stopped') == False assert hasattr(backend, 'stopped') == False # wait until the router has stopped. router.stop() worker.join() assert_equals(app.started, True) assert_equals(app.stopped, True) assert_equals(backend.started, True) assert_equals(backend.stopped, True) def test_router_finds_backends(): router = Router() test_backend = "rapidsms.backends.base" backend = router.add_backend("mock", test_backend) assert_equals(router.backends["mock"], backend) assert_equals(len(router.backends), 1) def test_router_downcases_backend_configs(): router = Router() test_backend = "rapidsms.backends.base" test_conf = { "a": 1, "B": 2, "Cc": 3 } backend = router.add_backend("mock", test_backend, test_conf) assert_equals(len(backend._config), 3) assert_equals("a" in backend._config, True) assert_equals("b" in backend._config, True) assert_equals("cc" in backend._config, True) assert_equals("B" in backend._config, False) assert_equals("Cc" in backend._config, False)
main.py
# -*- coding:utf-8 -*- import multiprocessing import random from datetime import datetime, time from time import sleep from time import time as times import os from urllib import parse import json import requests # !!!请填写下面的参数 # 填写用户名和密码,以便登录 username = '' password = '' # uid为用户id uid = '' # 此三项为签到参数,经纬度和真实姓名 latitude = '-1' longitude = '-1' name = '' # 下面三项一般不需要修改 clientip = '' signuseragent = '' # 图片签到所需要的objectId,具体ID可抓包获得 objectId = 'a58cb2acedf5fa10d2ad2fc421fb7d30' # 每次课的上课时间 start_time = { time(8, 0), time(10, 0), time(13, 30), time(15, 30), } # 监听时长,例如设为20,就从每次上课开始监听20分钟 listen_time = 20 # 星期几上课,特殊:星期天是0 start_day = [1, 2, 3, 4, 5] # 从这里开始不需要修改 useragent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_4_1 like Mac OS X) ' \ 'AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 ' \ 'ChaoXingStudy/ChaoXingStudy_3_4.4.1_ios_phone_202004111750_39 (@Kalimdor)_4375872153618237766 ' \ 'ChaoXingStudy/ChaoXingStudy_3_4.4.1_ios_phone_202004111750_39 (@Kalimdor)_4375872153618237766' encode_name = parse.quote(name) cookie_filename = 'chaoxing_cookies' header = { 'Cookie': '', 'User-Agent': useragent } def myprint(string): print(datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S') + ' ' + string) def getCookies(): myprint('正在登录,获取新Cookie') if username and password: url = 'https://passport2-api.chaoxing.com/v11/loginregister' payload = { 'uname': username, 'code': password } cookie_jar = requests.session().post(url=url, data=payload, headers={'User-Agent': useragent}).cookies cookie_dict = requests.utils.dict_from_cookiejar(cookie_jar) cookie_str = '' for key in cookie_dict: cookie_str += key + '=' + cookie_dict[key] + '; ' header['cookie'] = cookie_str with open(cookie_filename, 'w', encoding='utf-8')as file: file.write(cookie_str) myprint('获取Cookie成功') else: myprint('plz edit username and password in this python file') def loadCookie(): if os.path.exists(cookie_filename): with open(cookie_filename, 'r', encoding='utf-8') as f: data = f.read().strip() if data: header['cookie'] = data else: getCookies() else: getCookies() should_run = False coursedata = [] activates = [] timestamp: float = 0 def listenThread(): global should_run while should_run: def backClassData(): cdata = {} url = 'http://mooc1-api.chaoxing.com/mycourse/backclazzdata?view=json&rss=1' while not cdata: res = requests.get(url, headers=header) res_data = res.text if '请重新登录' in res_data: getCookies() continue try: cdata = json.loads(res_data) except json.JSONDecodeError: myprint('Cookie已失效,将重新获取') getCookies() continue if not cdata: getCookies() continue if cdata['result'] != 1: myprint('课程列表获取失败') sleep(10) continue for item in cdata['channelList']: if 'course' not in item['content']: continue pushdata = {'courseid': item['content']['course']['data'][0]['id'], 'name': item['content']['course']['data'][0]['name'], 'imageurl': item['content']['course']['data'][0]['imageurl'], 'classid': item['content']['id']} coursedata.append(pushdata) myprint('课程获取成功\n') printCourseData() def printCourseData(): global coursedata for index, item in enumerate(coursedata): print(str(index + 1) + "." + item['name']) startSign() def taskActiveList(courseId, classId): url = 'https://mobilelearn.chaoxing.com/ppt/activeAPI/taskactivelist?' \ 'courseId=' + str(courseId) + \ '&classId=' + str(classId) + \ '&uid=' + uid res = requests.get(url, headers=header) data_json = json.loads(res.text) activeList = data_json['activeList'] for item in activeList: if 'nameTwo' not in item: continue if item['activeType'] == 2 and item['status'] == 1: signurl = item['url'] aid = getVar(signurl) if aid not in activates: myprint('待签到活动 名称:%s 状态:%s 时间:%s ' % (item['nameOne'], item['nameTwo'], item['nameFour'])) sign(aid, uid, courseId) def getVar(url): var1 = url.split('&') for var in var1: var2 = var.split('=') if var2[0] == 'activePrimaryId': return var2[1] return 'notfound' def sign(aid, uid, courseid): global should_run, activates url = 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax?' \ 'activeId=' + aid + \ '&uid=' + uid + \ '&clientip=' + clientip + \ '&useragent=' + signuseragent +\ '&latitude=' + latitude + \ '&longitude=' + longitude + \ '&appType=15' + \ '&fid=2378' + \ '&objectId=' + objectId + \ '&name=' + encode_name res = requests.get(url, headers=header) course_name = '' for item in coursedata: if item['courseid'] == courseid: course_name = item['name'] if res.text == 'success': myprint(course_name + ': 签到成功!') activates.append(aid) sleep(60) should_run = False elif res.text == '您已签到过了': myprint(course_name + ': 您已签到过了') activates.append(aid) sleep(60) should_run = False else: myprint(course_name + ': 签到失败') activates.append(aid) def startSign(): global should_run while should_run: print('\n') for item in coursedata: myprint('正在监听: ' + str(item['name'])) taskActiveList(item['courseid'], item['classid']) sleep(3.7) if not should_run: break if should_run: sleep(random.randint(37, 88)) myprint('任务结束') backClassData() def listen(): myprint('主程序启动') child_process = None global should_run, timestamp loadCookie() while True: current_time = datetime.now().strftime('%H:%M') weekday = datetime.now().strftime('%w') if int(weekday) in start_day: for item in start_time: if str(item)[:-3] == current_time: timestamp = times() should_run = True if should_run and times() - timestamp > 60 * listen_time: should_run = False if should_run and child_process is None: myprint('监听开始\n') child_process = multiprocessing.Process(target=listenThread) child_process.start() if not should_run and child_process is not None: myprint('监听结束\n') child_process.terminate() child_process.join() child_process = None sleep(10) if __name__ == '__main__': try: listen() except KeyboardInterrupt: myprint('主动停止运行')
test_interrupt.py
#!/usr/bin/env python3 import rclpy import time import json from rclpy.executors import SingleThreadedExecutor from rclpy.node import Node import rmf_adapter as adpt import rmf_adapter.vehicletraits as traits import rmf_adapter.geometry as geometry import rmf_adapter.graph as graph import rmf_adapter.battery as battery import rmf_adapter.plan as plan import rmf_adapter.type as Type import asyncio import threading from itertools import groupby from test_utils import MockRobotCommand from test_utils import task_state_observer_fn from functools import partial test_task_id = "patrol.direct.001" # aka task_id map_name = "test_map" fleet_name = "test_fleet" rmf_server_uri = "ws://localhost:7878" # random port start_name = "start_wp" # 7 finish_name = "finish_wp" # 10 loop_count = 2 def main(): # INIT RCL ================================================================ rclpy.init() try: adpt.init_rclcpp() except RuntimeError: # Continue if it is already initialized pass # INIT GRAPH ============================================================== # Copied from test_loop.py test_graph = graph.Graph() test_graph.add_waypoint(map_name, [0.0, -10.0]) # 0 test_graph.add_waypoint(map_name, [0.0, -5.0]) # 1 test_graph.add_waypoint(map_name, [5.0, -5.0]) # 2 test_graph.add_waypoint(map_name, [-10.0, 0]) # 3 test_graph.add_waypoint(map_name, [-5.0, 0.0]) # 4 test_graph.add_waypoint(map_name, [0.0, 0.0]) # 5 test_graph.add_waypoint(map_name, [5.0, 0.0]) # 6 test_graph.add_waypoint(map_name, [10.0, 0.0]) # 7 test_graph.add_waypoint(map_name, [0.0, 5.0]) # 8 test_graph.add_waypoint(map_name, [5.0, 5.0]) # 9 test_graph.add_waypoint(map_name, [0.0, 10.0]).set_holding_point( True).set_charger(True) # 10 test_graph.add_bidir_lane(0, 1) # 0 1 test_graph.add_bidir_lane(1, 2) # 2 3 test_graph.add_bidir_lane(1, 5) # 4 5 test_graph.add_bidir_lane(2, 6) # 6 7 test_graph.add_bidir_lane(3, 4) # 8 9 test_graph.add_bidir_lane(4, 5) # 10 11 test_graph.add_bidir_lane(5, 6) # 12 13 test_graph.add_dock_lane(6, 7, "A") # 14 15 test_graph.add_bidir_lane(5, 8) # 16 17 test_graph.add_bidir_lane(6, 9) # 18 19 test_graph.add_bidir_lane(8, 9) # 20 21 test_graph.add_dock_lane(8, 10, "B") # 22 23 test_graph.add_key(start_name, 7) test_graph.add_key(finish_name, 10) # INIT FLEET ============================================================== profile = traits.Profile(geometry.make_final_convex_circle(1.0)) robot_traits = traits.VehicleTraits( linear=traits.Limits(0.7, 0.3), angular=traits.Limits(1.0, 0.45), profile=profile ) adapter = adpt.MockAdapter("TestInterruptAdapter") fleet = adapter.add_fleet( fleet_name, robot_traits, test_graph, rmf_server_uri ) def patrol_req_cb(json_desc): confirmation = adpt.fleet_update_handle.Confirmation() confirmation.accept() print(f" accepted patrol req: {json_desc}") return confirmation # Callback when a patrol request is received fleet.consider_patrol_requests( patrol_req_cb) # Set fleet battery profile battery_sys = battery.BatterySystem.make(24.0, 40.0, 8.8) mech_sys = battery.MechanicalSystem.make(70.0, 40.0, 0.22) motion_sink = battery.SimpleMotionPowerSink(battery_sys, mech_sys) ambient_power_sys = battery.PowerSystem.make(20.0) ambient_sink = battery.SimpleDevicePowerSink( battery_sys, ambient_power_sys) tool_power_sys = battery.PowerSystem.make(10.0) tool_sink = battery.SimpleDevicePowerSink(battery_sys, tool_power_sys) b_success = fleet.set_task_planner_params( battery_sys, motion_sink, ambient_sink, tool_sink, 0.2, 1.0, False) assert b_success, "set task planner params failed" cmd_node = Node("RobotCommandHandle") start = plan.Start(adapter.now(), 0, 0.0) def updater_inserter(handle_obj, updater): updater.update_battery_soc(1.0) handle_obj.updater = updater robot_cmd = MockRobotCommand(cmd_node, test_graph) fleet.add_robot( robot_cmd, "T0", profile, [start], partial(updater_inserter, robot_cmd) ) # FINAL PREP ============================================================== rclpy_executor = SingleThreadedExecutor() rclpy_executor.add_node(cmd_node) # GO! ===================================================================== adapter.start() print("\n") print("# SENDING SINGLE DIRECT REQUEST ####################################") # INIT TASK STATE OBSERVER ============================================== print("spawn observer thread") fut = asyncio.Future() observer_th = threading.Thread( target=task_state_observer_fn, args=(fut, test_task_id)) observer_th.start() # TODO(YL): import rmf_api_msgs task schema pydantic here # Create a task to dispatch task_json_obj = { "category": "patrol", "unix_millis_earliest_start_time": 0, "description": { "places": [start_name, finish_name], "rounds": loop_count } } def receive_response(response): if not response['success']: print(f'Received failure response:\n{response}') assert response['success'] print(' -- About to submit direct task request') robot_cmd.updater.submit_direct_request( task_json_obj, test_task_id, receive_response ) print(' -- Submitted direct task request') print('About to sleep...') time.sleep(1) print('...Done sleeping') # check observer completion and timeout start_time = time.time() for _ in range(5): rclpy_executor.spin_once(1) time.sleep(0.2) is_interrupted = False def on_interrupted(): nonlocal is_interrupted is_interrupted = True interruption = robot_cmd.updater.interrupt( ['test'], on_interrupted ) for _ in range(5): rclpy_executor.spin_once(1) time.sleep(0.2) assert is_interrupted, "on_interrupted callback did not get triggered" interruption.resume(['verified']) for _ in range(1000): if ((time.time() - start_time) > 15): if fut.done(): break fut.set_result(True) # Properly end observer thread assert False, "Timeout, target task is not Completed." if fut.done(): print("Tasks Complete.") break rclpy_executor.spin_once(1) time.sleep(0.2) print("\n== DEBUG TASK REPORT ==") print("Visited waypoints:", robot_cmd.visited_waypoints) # Filter the wps, this will remove consecutive duplicated waypoints filtered_visited_wps = [x[0] for x in groupby(robot_cmd.visited_waypoints)] expected_route = [0, 5, 6, 7, 6, 5, 8, 10, 8, 5, 6, 7, 6, 5, 8, 10] assert filtered_visited_wps == expected_route, ( f"Robot did not take the expected route") cmd_node.destroy_node() rclpy_executor.shutdown() rclpy.shutdown() if __name__ == "__main__": main()
i_to_r.py
"""Defines blocking function inditoredis: Receives XML data from indiserver on port 7624 and stores in redis. Reads data published via redis, and outputs to port 7624 and indiserver. """ import os, sys, collections, threading, asyncio, pathlib from time import sleep from datetime import datetime import xml.etree.ElementTree as ET from . import toindi, fromindi, tools REDIS_AVAILABLE = True try: import redis except: REDIS_AVAILABLE = False # _STARTTAGS is a tuple of ( b'<defTextVector', ... ) data received will be tested to start with such a starttag _STARTTAGS = tuple(b'<' + tag for tag in fromindi.TAGS) # _ENDTAGS is a tuple of ( b'</defTextVector>', ... ) data received will be tested to end with such an endtag _ENDTAGS = tuple(b'</' + tag + b'>' for tag in fromindi.TAGS) class _PortHandler: "Creates a connection and sends an receives to the indiserver port" def __init__(self, loop, rconn, indiserver): "Stores the argument values, and creates a collections.deque object" self.loop = loop self.rconn = rconn self.indiserver = indiserver self.to_indi = collections.deque(maxlen=100) # The to_indi dequeue has the right side filled from redis via toindi.SenderLoop # which monitors the traffic published to redis and appends it to this deque # and the left side is sent to indiserver via this objects txtoindi method async def handle_data(self): "coroutine to create the connection and start the sender and receiver" # start by openning a connection reader, writer = await asyncio.open_connection(self.indiserver.host, self.indiserver.port) _message(self.rconn, f"Connected to {self.indiserver.host}:{self.indiserver.port}") await asyncio.gather(self.txtoindi(writer), self.rxfromindi(reader)) async def txtoindi(self, writer): "Monitors to_indi deque and if it has data, pops it and uses writer to send it" while True: if self.to_indi: # Send the next message to the indiserver writer.write(self.to_indi.popleft()) await writer.drain() else: # no message to send, do an async pause await asyncio.sleep(0.5) async def rxfromindi(self, reader): # get received data, and put it into message message = b'' messagetagnumber = None while True: # get blocks of data from the indiserver try: data = await reader.readuntil(separator=b'>') except asyncio.LimitOverrunError: data = await reader.read(n=32000) if not message: # data is expected to start with <tag, first strip any newlines data = data.strip() for index, st in enumerate(_STARTTAGS): if data.startswith(st): messagetagnumber = index break else: # data does not start with a recognised tag, so ignore it # and continue waiting for a valid message start continue # set this data into the received message message = data # either further children of this tag are coming, or maybe its a single tag ending in "/>" if message.endswith(b'/>'): # the message is complete, handle message here # Run 'fromindi.receive_from_indiserver' in the default loop's executor: try: root = ET.fromstring(message.decode("utf-8")) except Exception: # possible malformed message = b'' messagetagnumber = None continue result = await self.loop.run_in_executor(None, fromindi.receive_from_indiserver, message, root, self.rconn) # result is None, or the device name if a defxxxx was received # and start again, waiting for a new message message = b'' messagetagnumber = None # and read either the next message, or the children of this tag continue # To reach this point, the message is in progress, with a messagetagnumber set # keep adding the received data to message, until an endtag is reached message += data if message.endswith(_ENDTAGS[messagetagnumber]): # the message is complete, handle message here # Run 'fromindi.receive_from_indiserver' in the default loop's executor: try: root = ET.fromstring(message.decode("utf-8")) except Exception: # possible malformed message = b'' messagetagnumber = None continue result = await self.loop.run_in_executor(None, fromindi.receive_from_indiserver, message, root, self.rconn) # result is None, or the device name if a defxxxx was received # and start again, waiting for a new message message = b'' messagetagnumber = None def inditoredis(indiserver, redisserver, log_lengths={}, blob_folder=''): """Blocking call that provides the indiserver - redis conversion :param indiserver: Named Tuple providing the indiserver parameters :type indiserver: namedtuple :param redisserver: Named Tuple providing the redis server parameters :type redisserver: namedtuple :param log_lengths: provides number of logs to store :type log_lengths: dictionary :param blob_folder: Folder where Blobs will be stored :type blob_folder: String """ if not REDIS_AVAILABLE: print("Error - Unable to import the Python redis package") sys.exit(1) print("inditoredis started") # wait two seconds before starting, to give servers # time to start up sleep(2) if blob_folder: blob_folder = pathlib.Path(blob_folder).expanduser().resolve() else: print("Error - a blob_folder must be given") sys.exit(2) # check if the blob_folder exists if not blob_folder.exists(): # if not, create it blob_folder.mkdir(parents=True) if not blob_folder.is_dir(): print("Error - blob_folder already exists and is not a directory") sys.exit(3) # set up the redis server rconn = tools.open_redis(redisserver) # set the fromindi parameters fromindi.setup_redis(redisserver.keyprefix, redisserver.to_indi_channel, redisserver.from_indi_channel, log_lengths, blob_folder) # on startup, clear all redis keys tools.clearredis(rconn, redisserver) # Now create a loop to tx and rx to the indiserver port loop = asyncio.get_event_loop() indiconnection = _PortHandler(loop, rconn, indiserver) # Create a SenderLoop object, with the indiconnection.to_indi dequeue and redis connection senderloop = toindi.SenderLoop(indiconnection.to_indi, rconn, redisserver) # run senderloop - which is blocking, so run in its own thread run_toindi = threading.Thread(target=senderloop) # and start senderloop in its thread, this monitors data published via redis, and appends # it to indiconnection.to_indi, where it will be sent on to the indi connection run_toindi.start() while True: indiconnection.to_indi.clear() indiconnection.to_indi.append(b'<getProperties version="1.7" />') try: loop.run_until_complete(indiconnection.handle_data()) except ConnectionRefusedError: _message(rconn, f"Connection refused on {indiserver.host}:{indiserver.port}, re-trying...") sleep(5) except asyncio.IncompleteReadError: _message(rconn, f"Connection failed on {indiserver.host}:{indiserver.port}, re-trying...") sleep(5) else: loop.close() break def _message(rconn, message): "Saves a message to redis, as if a message had been received from indiserver" try: print(message) timestamp = datetime.utcnow().isoformat(timespec='seconds') message_object = fromindi.Message({'message':message, 'timestamp':timestamp}) message_object.write(rconn) message_object.log(rconn, timestamp) except Exception: pass return
test_smtplib.py
import asyncore import base64 import email.mime.text from email.message import EmailMessage from email.base64mime import body_encode as encode_base64 import email.utils import hmac import socket import smtpd import smtplib import io import re import sys import time import select import errno import textwrap import threading import unittest from test import support, mock_socket from test.support import HOST, HOSTv4, HOSTv6 if sys.platform == 'darwin': # select.poll returns a select.POLLHUP at the end of the tests # on darwin, so just ignore it def handle_expt(self): pass smtpd.SMTPChannel.handle_expt = handle_expt def server(evt, buf, serv): serv.listen() evt.set() try: conn, addr = serv.accept() except socket.timeout: pass else: n = 500 while buf and n > 0: r, w, e = select.select([], [conn], []) if w: sent = conn.send(buf) buf = buf[sent:] n -= 1 conn.close() finally: serv.close() evt.set() class GeneralTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket self.port = 25 def tearDown(self): smtplib.socket = socket # This method is no longer used but is retained for backward compatibility, # so test to make sure it still works. def testQuoteData(self): teststr = "abc\n.jkl\rfoo\r\n..blue" expected = "abc\r\n..jkl\r\nfoo\r\n...blue" self.assertEqual(expected, smtplib.quotedata(teststr)) def testBasic1(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port) smtp.close() def testSourceAddress(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port, source_address=('127.0.0.1',19876)) self.assertEqual(smtp.source_address, ('127.0.0.1', 19876)) smtp.close() def testBasic2(self): mock_socket.reply_with(b"220 Hola mundo") # connects, include port in host name smtp = smtplib.SMTP("%s:%s" % (HOST, self.port)) smtp.close() def testLocalHostName(self): mock_socket.reply_with(b"220 Hola mundo") # check that supplied local_hostname is used smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost") self.assertEqual(smtp.local_hostname, "testhost") smtp.close() def testTimeoutDefault(self): mock_socket.reply_with(b"220 Hola mundo") self.assertIsNone(mock_socket.getdefaulttimeout()) mock_socket.setdefaulttimeout(30) self.assertEqual(mock_socket.getdefaulttimeout(), 30) try: smtp = smtplib.SMTP(HOST, self.port) finally: mock_socket.setdefaulttimeout(None) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() def testTimeoutNone(self): mock_socket.reply_with(b"220 Hola mundo") self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: smtp = smtplib.SMTP(HOST, self.port, timeout=None) finally: socket.setdefaulttimeout(None) self.assertIsNone(smtp.sock.gettimeout()) smtp.close() def testTimeoutValue(self): mock_socket.reply_with(b"220 Hola mundo") smtp = smtplib.SMTP(HOST, self.port, timeout=30) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() def test_debuglevel(self): mock_socket.reply_with(b"220 Hello world") smtp = smtplib.SMTP() smtp.set_debuglevel(1) with support.captured_stderr() as stderr: smtp.connect(HOST, self.port) smtp.close() expected = re.compile(r"^connect:", re.MULTILINE) self.assertRegex(stderr.getvalue(), expected) def test_debuglevel_2(self): mock_socket.reply_with(b"220 Hello world") smtp = smtplib.SMTP() smtp.set_debuglevel(2) with support.captured_stderr() as stderr: smtp.connect(HOST, self.port) smtp.close() expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ", re.MULTILINE) self.assertRegex(stderr.getvalue(), expected) # Test server thread using the specified SMTP server class def debugging_server(serv, serv_evt, client_evt): serv_evt.set() try: if hasattr(select, 'poll'): poll_fun = asyncore.poll2 else: poll_fun = asyncore.poll n = 1000 while asyncore.socket_map and n > 0: poll_fun(0.01, asyncore.socket_map) # when the client conversation is finished, it will # set client_evt, and it's then ok to kill the server if client_evt.is_set(): serv.close() break n -= 1 except socket.timeout: pass finally: if not client_evt.is_set(): # allow some time for the client to read the result time.sleep(0.5) serv.close() asyncore.close_all() serv_evt.set() MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n' MSG_END = '------------ END MESSAGE ------------\n' # NOTE: Some SMTP objects in the tests below are created with a non-default # local_hostname argument to the constructor, since (on some systems) the FQDN # lookup caused by the default local_hostname sometimes takes so long that the # test server times out, causing the test to fail. # Test behavior of smtpd.DebuggingServer class DebuggingServerTests(unittest.TestCase): maxDiff = None def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn # temporarily replace sys.stdout to capture DebuggingServer output self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.serv_evt = threading.Event() self.client_evt = threading.Event() # Capture SMTPChannel debug output self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM smtpd.DEBUGSTREAM = io.StringIO() # Pick a random unused port by passing 0 for the port number self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what server host and port were assigned self.host, self.port = self.serv.socket.getsockname()[:2] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() # restore sys.stdout sys.stdout = self.old_stdout # restore DEBUGSTREAM smtpd.DEBUGSTREAM.close() smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM def get_output_without_xpeer(self): test_output = self.output.getvalue() return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2', test_output, flags=re.MULTILINE|re.DOTALL) def testBasic(self): # connect smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.quit() def testSourceAddress(self): # connect src_port = support.find_unused_port() try: smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost', timeout=3, source_address=(self.host, src_port)) self.assertEqual(smtp.source_address, (self.host, src_port)) self.assertEqual(smtp.local_hostname, 'localhost') smtp.quit() except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to source port %d" % src_port) raise def testNOOP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.noop(), expected) smtp.quit() def testRSET(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.rset(), expected) smtp.quit() def testELHO(self): # EHLO isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'\nSIZE 33554432\nHELP') self.assertEqual(smtp.ehlo(), expected) smtp.quit() def testEXPNNotImplemented(self): # EXPN isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (502, b'EXPN not implemented') smtp.putcmd('EXPN') self.assertEqual(smtp.getreply(), expected) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (252, b'Cannot VRFY user, but will accept message ' + \ b'and attempt delivery') self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected) self.assertEqual(smtp.verify('nobody@nowhere.com'), expected) smtp.quit() def testSecondHELO(self): # check that a second HELO returns a message that it's a duplicate # (this behavior is specific to smtpd.SMTPChannel) smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.helo() expected = (503, b'Duplicate HELO/EHLO') self.assertEqual(smtp.helo(), expected) smtp.quit() def testHELP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \ b'RCPT DATA RSET NOOP QUIT VRFY') smtp.quit() def testSend(self): # connect and send mail m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX(nnorwitz): this test is flaky and dies with a bad file descriptor # in asyncore. This sleep might help, but should really be fixed # properly by using an Event variable. time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendBinary(self): m = b'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNeedingDotQuote(self): # Issue 12283 m = '.A test\n.mes.sage.' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNullSender(self): m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('<>', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: <>$", re.MULTILINE) self.assertRegex(debugout, sender) def testSendMessage(self): m = email.mime.text.MIMEText('A test message') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='John', to_addrs='Sally') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds as figuring out # exactly what IP address format is put there is not easy (and # irrelevant to our test). Typically 127.0.0.1 or ::1, but it is # not always the same as socket.gethostbyname(HOST). :( test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) def testSendMessageWithAddresses(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() # make sure the Bcc header is still in the message. self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" ' '<warped@silly.walks.com>') self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] # The Bcc header should not be transmitted. del m['Bcc'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: foo@bar.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Sally', 'Fred', 'root@localhost', 'warped@silly.walks.com'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSomeAddresses(self): # Make sure nothing breaks if not all of the three 'to' headers exist m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: foo@bar.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSpecifiedAddresses(self): # Make sure addresses specified in call override those in message. m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: joe@example.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertNotRegex(debugout, to_addr) recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE) self.assertRegex(debugout, recip) def testSendMessageWithMultipleFrom(self): # Sender overrides To m = email.mime.text.MIMEText('A test message') m['From'] = 'Bernard, Bianca' m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageResent(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = 'holy@grail.net' m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff' m['Resent-Bcc'] = 'doe@losthope.net' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # The Resent-Bcc headers are deleted before serialization. del m['Bcc'] del m['Resent-Bcc'] # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: holy@grail.net$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageMultipleResentRaises(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = 'holy@grail.net' m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff' m['Resent-Bcc'] = 'doe@losthope.net' m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000' m['Resent-To'] = 'holy@grail.net' m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) with self.assertRaises(ValueError): smtp.send_message(m) smtp.close() class NonConnectingTests(unittest.TestCase): def testNotConnected(self): # Test various operations on an unconnected SMTP object that # should raise exceptions (at present the attempt in SMTP.send # to reference the nonexistent 'sock' attribute of the SMTP object # causes an AttributeError) smtp = smtplib.SMTP() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, 'test msg') def testNonnumericPort(self): # check that non-numeric port raises OSError self.assertRaises(OSError, smtplib.SMTP, "localhost", "bogus") self.assertRaises(OSError, smtplib.SMTP, "localhost:bogus") # test response of client to a non-successful HELO message class BadHELOServerTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket mock_socket.reply_with(b"199 no hello for you!") self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.port = 25 def tearDown(self): smtplib.socket = socket sys.stdout = self.old_stdout def testFailingHELO(self): self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP, HOST, self.port, 'localhost', 3) class TooLongLineTests(unittest.TestCase): respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n' def setUp(self): self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(15) self.port = support.bind_port(self.sock) servargs = (self.evt, self.respdata, self.sock) thread = threading.Thread(target=server, args=servargs) thread.start() self.addCleanup(thread.join) self.evt.wait() self.evt.clear() def tearDown(self): self.evt.wait() sys.stdout = self.old_stdout def testLineTooLong(self): self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP, HOST, self.port, 'localhost', 3) sim_users = {'Mr.A@somewhere.com':'John A', 'Ms.B@xn--fo-fka.com':'Sally B', 'Mrs.C@somewhereesle.com':'Ruth C', } sim_auth = ('Mr.A@somewhere.com', 'somepassword') sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn' 'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=') sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'], 'list-2':['Ms.B@xn--fo-fka.com',], } # Simulated SMTP channel & server class ResponseException(Exception): pass class SimSMTPChannel(smtpd.SMTPChannel): quit_response = None mail_response = None rcpt_response = None data_response = None rcpt_count = 0 rset_count = 0 disconnect = 0 AUTH = 99 # Add protocol state to enable auth testing. authenticated_user = None def __init__(self, extra_features, *args, **kw): self._extrafeatures = ''.join( [ "250-{0}\r\n".format(x) for x in extra_features ]) super(SimSMTPChannel, self).__init__(*args, **kw) # AUTH related stuff. It would be nice if support for this were in smtpd. def found_terminator(self): if self.smtp_state == self.AUTH: line = self._emptystring.join(self.received_lines) print('Data:', repr(line), file=smtpd.DEBUGSTREAM) self.received_lines = [] try: self.auth_object(line) except ResponseException as e: self.smtp_state = self.COMMAND self.push('%s %s' % (e.smtp_code, e.smtp_error)) return super().found_terminator() def smtp_AUTH(self, arg): if not self.seen_greeting: self.push('503 Error: send EHLO first') return if not self.extended_smtp or 'AUTH' not in self._extrafeatures: self.push('500 Error: command "AUTH" not recognized') return if self.authenticated_user is not None: self.push( '503 Bad sequence of commands: already authenticated') return args = arg.split() if len(args) not in [1, 2]: self.push('501 Syntax: AUTH <mechanism> [initial-response]') return auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_') try: self.auth_object = getattr(self, auth_object_name) except AttributeError: self.push('504 Command parameter not implemented: unsupported ' ' authentication mechanism {!r}'.format(auth_object_name)) return self.smtp_state = self.AUTH self.auth_object(args[1] if len(args) == 2 else None) def _authenticated(self, user, valid): if valid: self.authenticated_user = user self.push('235 Authentication Succeeded') else: self.push('535 Authentication credentials invalid') self.smtp_state = self.COMMAND def _decode_base64(self, string): return base64.decodebytes(string.encode('ascii')).decode('utf-8') def _auth_plain(self, arg=None): if arg is None: self.push('334 ') else: logpass = self._decode_base64(arg) try: *_, user, password = logpass.split('\0') except ValueError as e: self.push('535 Splitting response {!r} into user and password' ' failed: {}'.format(logpass, e)) return self._authenticated(user, password == sim_auth[1]) def _auth_login(self, arg=None): if arg is None: # base64 encoded 'Username:' self.push('334 VXNlcm5hbWU6') elif not hasattr(self, '_auth_login_user'): self._auth_login_user = self._decode_base64(arg) # base64 encoded 'Password:' self.push('334 UGFzc3dvcmQ6') else: password = self._decode_base64(arg) self._authenticated(self._auth_login_user, password == sim_auth[1]) del self._auth_login_user def _auth_cram_md5(self, arg=None): if arg is None: self.push('334 {}'.format(sim_cram_md5_challenge)) else: logpass = self._decode_base64(arg) try: user, hashed_pass = logpass.split() except ValueError as e: self.push('535 Splitting response {!r} into user and password' 'failed: {}'.format(logpass, e)) return False valid_hashed_pass = hmac.HMAC( sim_auth[1].encode('ascii'), self._decode_base64(sim_cram_md5_challenge).encode('ascii'), 'md5').hexdigest() self._authenticated(user, hashed_pass == valid_hashed_pass) # end AUTH related stuff. def smtp_EHLO(self, arg): resp = ('250-testhost\r\n' '250-EXPN\r\n' '250-SIZE 20000000\r\n' '250-STARTTLS\r\n' '250-DELIVERBY\r\n') resp = resp + self._extrafeatures + '250 HELP' self.push(resp) self.seen_greeting = arg self.extended_smtp = True def smtp_VRFY(self, arg): # For max compatibility smtplib should be sending the raw address. if arg in sim_users: self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg))) else: self.push('550 No such user: %s' % arg) def smtp_EXPN(self, arg): list_name = arg.lower() if list_name in sim_lists: user_list = sim_lists[list_name] for n, user_email in enumerate(user_list): quoted_addr = smtplib.quoteaddr(user_email) if n < len(user_list) - 1: self.push('250-%s %s' % (sim_users[user_email], quoted_addr)) else: self.push('250 %s %s' % (sim_users[user_email], quoted_addr)) else: self.push('550 No access for you!') def smtp_QUIT(self, arg): if self.quit_response is None: super(SimSMTPChannel, self).smtp_QUIT(arg) else: self.push(self.quit_response) self.close_when_done() def smtp_MAIL(self, arg): if self.mail_response is None: super().smtp_MAIL(arg) else: self.push(self.mail_response) if self.disconnect: self.close_when_done() def smtp_RCPT(self, arg): if self.rcpt_response is None: super().smtp_RCPT(arg) return self.rcpt_count += 1 self.push(self.rcpt_response[self.rcpt_count-1]) def smtp_RSET(self, arg): self.rset_count += 1 super().smtp_RSET(arg) def smtp_DATA(self, arg): if self.data_response is None: super().smtp_DATA(arg) else: self.push(self.data_response) def handle_error(self): raise class SimSMTPServer(smtpd.SMTPServer): channel_class = SimSMTPChannel def __init__(self, *args, **kw): self._extra_features = [] self._addresses = {} smtpd.SMTPServer.__init__(self, *args, **kw) def handle_accepted(self, conn, addr): self._SMTPchannel = self.channel_class( self._extra_features, self, conn, addr, decode_data=self._decode_data) def process_message(self, peer, mailfrom, rcpttos, data): self._addresses['from'] = mailfrom self._addresses['tos'] = rcpttos def add_feature(self, feature): self._extra_features.append(feature) def handle_error(self): raise # Test various SMTP & ESMTP commands/behaviors that require a simulated server # (i.e., something with more features than DebuggingServer) class SMTPSimTests(unittest.TestCase): def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def testBasic(self): # smoke test smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.quit() def testEHLO(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) # no features should be present before the EHLO self.assertEqual(smtp.esmtp_features, {}) # features expected from the test server expected_features = {'expn':'', 'size': '20000000', 'starttls': '', 'deliverby': '', 'help': '', } smtp.ehlo() self.assertEqual(smtp.esmtp_features, expected_features) for k in expected_features: self.assertTrue(smtp.has_extn(k)) self.assertFalse(smtp.has_extn('unsupported-feature')) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for addr_spec, name in sim_users.items(): expected_known = (250, bytes('%s %s' % (name, smtplib.quoteaddr(addr_spec)), "ascii")) self.assertEqual(smtp.vrfy(addr_spec), expected_known) u = 'nobody@nowhere.com' expected_unknown = (550, ('No such user: %s' % u).encode('ascii')) self.assertEqual(smtp.vrfy(u), expected_unknown) smtp.quit() def testEXPN(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for listname, members in sim_lists.items(): users = [] for m in members: users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m))) expected_known = (250, bytes('\n'.join(users), "ascii")) self.assertEqual(smtp.expn(listname), expected_known) u = 'PSU-Members-List' expected_unknown = (550, b'No access for you!') self.assertEqual(smtp.expn(u), expected_unknown) smtp.quit() def testAUTH_PLAIN(self): self.serv.add_feature("AUTH PLAIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_LOGIN(self): self.serv.add_feature("AUTH LOGIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_CRAM_MD5(self): self.serv.add_feature("AUTH CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_multiple(self): # Test that multiple authentication methods are tried. self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def test_auth_function(self): supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'} for mechanism in supported: self.serv.add_feature("AUTH {}".format(mechanism)) for mechanism in supported: with self.subTest(mechanism=mechanism): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.ehlo('foo') smtp.user, smtp.password = sim_auth[0], sim_auth[1] method = 'auth_' + mechanism.lower().replace('-', '_') resp = smtp.auth(mechanism, getattr(smtp, method)) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def test_quit_resets_greeting(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) code, message = smtp.ehlo() self.assertEqual(code, 250) self.assertIn('size', smtp.esmtp_features) smtp.quit() self.assertNotIn('size', smtp.esmtp_features) smtp.connect(HOST, self.port) self.assertNotIn('size', smtp.esmtp_features) smtp.ehlo_or_helo_if_needed() self.assertIn('size', smtp.esmtp_features) smtp.quit() def test_with_statement(self): with smtplib.SMTP(HOST, self.port) as smtp: code, message = smtp.noop() self.assertEqual(code, 250) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') with smtplib.SMTP(HOST, self.port) as smtp: smtp.close() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') def test_with_statement_QUIT_failure(self): with self.assertRaises(smtplib.SMTPResponseException) as error: with smtplib.SMTP(HOST, self.port) as smtp: smtp.noop() self.serv._SMTPchannel.quit_response = '421 QUIT FAILED' self.assertEqual(error.exception.smtp_code, 421) self.assertEqual(error.exception.smtp_error, b'QUIT FAILED') #TODO: add tests for correct AUTH method fallback now that the #test infrastructure can support it. # Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception def test__rest_from_mail_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.mail_response = '451 Requested action aborted' self.serv._SMTPchannel.disconnect = True with self.assertRaises(smtplib.SMTPSenderRefused): smtp.sendmail('John', 'Sally', 'test message') self.assertIsNone(smtp.sock) # Issue 5713: make sure close, not rset, is called if we get a 421 error def test_421_from_mail_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.mail_response = '421 closing connection' with self.assertRaises(smtplib.SMTPSenderRefused): smtp.sendmail('John', 'Sally', 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) def test_421_from_rcpt_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing'] with self.assertRaises(smtplib.SMTPRecipientsRefused) as r: smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')}) def test_421_from_data_cmd(self): class MySimSMTPChannel(SimSMTPChannel): def found_terminator(self): if self.smtp_state == self.DATA: self.push('421 closing') else: super().found_terminator() self.serv.channel_class = MySimSMTPChannel smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() with self.assertRaises(smtplib.SMTPDataError): smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0) def test_smtputf8_NotSupportedError_if_no_server_support(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertTrue(smtp.does_esmtp) self.assertFalse(smtp.has_extn('smtputf8')) self.assertRaises( smtplib.SMTPNotSupportedError, smtp.sendmail, 'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8']) self.assertRaises( smtplib.SMTPNotSupportedError, smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8']) def test_send_unicode_without_SMTPUTF8(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '') self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice') def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self): # This test is located here and not in the SMTPUTF8SimTests # class because it needs a "regular" SMTP server to work msg = EmailMessage() msg['From'] = "Páolo <főo@bar.com>" msg['To'] = 'Dinsdale' msg['Subject'] = 'Nudge nudge, wink, wink \u1F609' smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) with self.assertRaises(smtplib.SMTPNotSupportedError): smtp.send_message(msg) def test_name_field_not_included_in_envelop_addresses(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3 ) self.addCleanup(smtp.close) message = EmailMessage() message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com')) message['To'] = email.utils.formataddr(('René', 'rene@example.com')) self.assertDictEqual(smtp.send_message(message), {}) self.assertEqual(self.serv._addresses['from'], 'michael@example.com') self.assertEqual(self.serv._addresses['tos'], ['rene@example.com']) class SimSMTPUTF8Server(SimSMTPServer): def __init__(self, *args, **kw): # The base SMTP server turns these on automatically, but our test # server is set up to munge the EHLO response, so we need to provide # them as well. And yes, the call is to SMTPServer not SimSMTPServer. self._extra_features = ['SMTPUTF8', '8BITMIME'] smtpd.SMTPServer.__init__(self, *args, **kw) def handle_accepted(self, conn, addr): self._SMTPchannel = self.channel_class( self._extra_features, self, conn, addr, decode_data=self._decode_data, enable_SMTPUTF8=self.enable_SMTPUTF8, ) def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None, rcpt_options=None): self.last_peer = peer self.last_mailfrom = mailfrom self.last_rcpttos = rcpttos self.last_message = data self.last_mail_options = mail_options self.last_rcpt_options = rcpt_options class SMTPUTF8SimTests(unittest.TestCase): maxDiff = None def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1), decode_data=False, enable_SMTPUTF8=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def test_test_server_supports_extensions(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertTrue(smtp.does_esmtp) self.assertTrue(smtp.has_extn('smtputf8')) def test_send_unicode_with_SMTPUTF8_via_sendmail(self): m = '¡a test message containing unicode!'.encode('utf-8') smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.sendmail('Jőhn', 'Sálly', m, mail_options=['BODY=8BITMIME', 'SMTPUTF8']) self.assertEqual(self.serv.last_mailfrom, 'Jőhn') self.assertEqual(self.serv.last_rcpttos, ['Sálly']) self.assertEqual(self.serv.last_message, m) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) def test_send_unicode_with_SMTPUTF8_via_low_level_API(self): m = '¡a test message containing unicode!'.encode('utf-8') smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertEqual( smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']), (250, b'OK')) self.assertEqual(smtp.rcpt('János'), (250, b'OK')) self.assertEqual(smtp.data(m), (250, b'OK')) self.assertEqual(self.serv.last_mailfrom, 'Jő') self.assertEqual(self.serv.last_rcpttos, ['János']) self.assertEqual(self.serv.last_message, m) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) def test_send_message_uses_smtputf8_if_addrs_non_ascii(self): msg = EmailMessage() msg['From'] = "Páolo <főo@bar.com>" msg['To'] = 'Dinsdale' msg['Subject'] = 'Nudge nudge, wink, wink \u1F609' # XXX I don't know why I need two \n's here, but this is an existing # bug (if it is one) and not a problem with the new functionality. msg.set_content("oh là là, know what I mean, know what I mean?\n\n") # XXX smtpd converts received /r/n to /n, so we can't easily test that # we are successfully sending /r/n :(. expected = textwrap.dedent("""\ From: Páolo <főo@bar.com> To: Dinsdale Subject: Nudge nudge, wink, wink \u1F609 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit MIME-Version: 1.0 oh là là, know what I mean, know what I mean? """) smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) self.assertEqual(smtp.send_message(msg), {}) self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com') self.assertEqual(self.serv.last_rcpttos, ['Dinsdale']) self.assertEqual(self.serv.last_message.decode(), expected) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='') class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel): def smtp_AUTH(self, arg): # RFC 4954's AUTH command allows for an optional initial-response. # Not all AUTH methods support this; some require a challenge. AUTH # PLAIN does those, so test that here. See issue #15014. args = arg.split() if args[0].lower() == 'plain': if len(args) == 2: # AUTH PLAIN <initial-response> with the response base 64 # encoded. Hard code the expected response for the test. if args[1] == EXPECTED_RESPONSE: self.push('235 Ok') return self.push('571 Bad authentication') class SimSMTPAUTHInitialResponseServer(SimSMTPServer): channel_class = SimSMTPAUTHInitialResponseChannel class SMTPAUTHInitialResponseSimTests(unittest.TestCase): def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPAUTHInitialResponseServer( (HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def testAUTH_PLAIN_initial_response_login(self): self.serv.add_feature('AUTH PLAIN') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.login('psu', 'doesnotexist') smtp.close() def testAUTH_PLAIN_initial_response_auth(self): self.serv.add_feature('AUTH PLAIN') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.user = 'psu' smtp.password = 'doesnotexist' code, response = smtp.auth('plain', smtp.auth_plain) smtp.close() self.assertEqual(code, 235) if __name__ == '__main__': unittest.main()
mossbackScaner.py
# -*- coding: utf-8 -*- # version: 0.5 # date: 2020.09.22 import json import socket import time import threading import socket from subprocess import PIPE from color_print import * from utils import format_save global glo_conf global glo_pkg_list global glo_lock global glo_scanner glo_pkg_list = [] glo_lock = threading.Lock() glo_scanner = [] with open('config.json', 'r') as fp: glo_conf = json.loads(fp.read()) for plugin in glo_conf['plugins']: exec("from " + plugin + " import " + plugin) glo_scanner.append(eval(plugin + "()")) def do_scan_thread(): global glo_pkg_list global glo_lock global glo_scanner req2file = format_save('') while True: glo_lock.acquire() if len(glo_pkg_list) > 0: pkg = json.loads(glo_pkg_list.pop(0)) glo_lock.release() if glo_conf['server_host'] != '*': req2file.save_request(pkg['method'], pkg['uri'], pkg['version'], pkg['header'], pkg['body']) # do all test here for fun in glo_scanner: fun.run(pkg['method'], pkg['uri'], pkg['version'], pkg['header'], pkg['body']) # test finished else: glo_lock.release() time.sleep(1) def main(): global glo_pkg_list global glo_lock global glo_conf t = threading.Thread(target=do_scan_thread, args=()) t.start() MAXBUFSIZ = 1048576 ADDRESS = ('', glo_conf['scanner_port']) tcpServerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tcpServerSocket.bind(ADDRESS) tcpServerSocket.listen() mitmCli = None while True: while mitmCli is None: try: mitmCli, _ = tcpServerSocket.accept() except: time.sleep(0.1) try: pack_data = mitmCli.recv(MAXBUFSIZ) except: mitmCli = None glo_lock.acquire() glo_pkg_list.append(pack_data) glo_lock.release() if __name__ == "__main__": main()
executor.py
# -*- coding: utf-8 -*- # # Robonomics liability execution node. # from robonomics_liability.msg import Liability from robonomics_liability.srv import FinishLiability, FinishLiabilityResponse, StartLiability, StartLiabilityResponse, PersistenceLiabilityTimestamp from robonomics_msgs.msg import Result from urllib.parse import urlparse from threading import Thread from .LiabilityExecutionThread import LiabilityExecutionThread from queue import Queue import rospy import ipfsapi import os from ethereum_common import eth_keyfile_helper class Executor: liability_queue = Queue() liability_finish = False def __init__(self): ''' Robonomics liability node initialisation. ''' rospy.init_node('robonomics_liability_executor') self.recording_topics = list(filter(None, [x.strip() for x in rospy.get_param('~recording_topics').split(",")])) self.master_check_interval = rospy.get_param('~master_check_interval') __keyfile = rospy.get_param('~keyfile') __keyfile_password_file = rospy.get_param('~keyfile_password_file') __keyfile_helper = eth_keyfile_helper.KeyfileHelper(__keyfile, keyfile_password_file=__keyfile_password_file) self.__account = __keyfile_helper.get_local_account_from_keyfile() ipfs_provider = urlparse(rospy.get_param('~ipfs_http_provider')).netloc.split(':') self.ipfs_client = ipfsapi.connect(ipfs_provider[0], int(ipfs_provider[1])) self.liability_execution_threads = {} # persistence publishers self.persistence_add = rospy.Publisher('persistence/add', Liability, queue_size=10) self.persistence_del = rospy.Publisher('persistence/del', Liability, queue_size=10) # persistence services self.persistence_get_liability_timestamp = rospy.ServiceProxy('persistence/get_liability_timestamp', PersistenceLiabilityTimestamp) self.executions_work_directory = os.path.join(os.getcwd(), 'liabilities_executions') def incoming_liability(msg): if msg.promisor.address != self.__account.address: rospy.logwarn('Liability %s is not for me, SKIP.', msg.address) else: rospy.loginfo('Append %s to liability queue.', msg.address) self.persistence_add.publish(msg) self.liability_queue.put(msg) rospy.Subscriber('incoming', Liability, incoming_liability) def finish_liability(msg): liability_thread = self.liability_execution_threads.pop(msg.address) liability_msg = liability_thread.getLiabilityMsg() result = liability_thread.finish(msg.success) self.persistence_del.publish(liability_msg) self.complete.publish(liability_msg) self.result_topic.publish(result) rospy.loginfo('Liability %s finished with %s', liability_msg.address.address, result.result) return FinishLiabilityResponse() rospy.Service('finish', FinishLiability, finish_liability) def start_liability(msg): try: liability_thread = self.liability_execution_threads[msg.address] except KeyError as e: rospy.logerr("Could not find liability %s for starting", msg.address) return StartLiabilityResponse(False, "Could not find liability {0} for starting".format(msg.address)) try: liability_thread.start() rospy.loginfo('Liability %s started', liability_thread.getLiabilityMsg().address) except Exception as e: rospy.logerr("Can't start liability %s with %s", msg.address, e) return StartLiabilityResponse(False, "Can't start liability {0} with exception: {1}".format(msg.address, e)) return StartLiabilityResponse(True, "Liability {0} started".format(liability_thread.getLiabilityMsg().address.address)) rospy.Service('start', StartLiability, start_liability) def restart_liability(msg): try: liability_thread = self.liability_execution_threads.pop(msg.address) except KeyError as e: rospy.logerr("Could not find liability %s for restarting", msg.address) return StartLiabilityResponse(False, "Could not find liability {0} for restarting".format(msg.address)) liability = liability_thread.getLiabilityMsg() try: liability_thread.interrupt(delete_result=True) rospy.loginfo('Liability %s interrupted', liability.address) except Exception as e: rospy.logerr("Can't interrupt liability %s with %s", msg.address, e) return StartLiabilityResponse(False, "Can't interrupt liability {0} with exception: {1}".format(msg.address, e)) try: self._createLiabilityExceutionThread(liability) except Exception as e: return StartLiabilityResponse(False, "Can't initialize liability {0} execution thread with exception: {1}".format(msg.address, e)) return start_liability(msg) rospy.Service('restart', StartLiability, restart_liability) def resume_liability(msg): try: liability_thread = self.liability_execution_threads[msg.address] except KeyError as e: rospy.logerr("Could not find liability %s for resuming", msg.address) return StartLiabilityResponse(False, "Could not find liability {0} for resuming".format(msg.address)) try: rospy.wait_for_service(self.persistence_get_liability_timestamp.resolved_name) timestamp = self.persistence_get_liability_timestamp(liability_thread.getLiabilityMsg().address) rospy.logwarn("Getting %s timestamp for liability %s", timestamp.timestamp, msg.address) liability_thread.start(timestamp.timestamp) rospy.loginfo('Liability %s resumed', liability_thread.getLiabilityMsg().address) except Exception as e: rospy.logerr("Can't resume liability %s with %s", msg.address, e) return StartLiabilityResponse(False, "Can't resume liability {0} with exception: {1}".format(msg.address, e)) return StartLiabilityResponse(True, "Liability {0} resumed".format(liability_thread.getLiabilityMsg().address.address)) rospy.Service('resume', StartLiability, resume_liability) self.complete = rospy.Publisher('complete', Liability, queue_size=10) self.ready = rospy.Publisher('ready', Liability, queue_size=10) self.result_topic = rospy.Publisher('result', Result, queue_size=10) def _createLiabilityExceutionThread(self, liability): liability_work_directory = os.path.join(self.executions_work_directory, liability.address.address) os.makedirs(liability_work_directory, exist_ok=True) rospy.loginfo('Use directory %s for liability %s executor thread', liability_work_directory, liability.address.address) thread = LiabilityExecutionThread(liability_work_directory, self.ipfs_client, self.master_check_interval, self.recording_topics, liability) self.liability_execution_threads[liability.address.address] = thread def _liability_worker(self): while not rospy.is_shutdown(): msg = self.liability_queue.get() rospy.loginfo('Prepare to start liability %s', msg.address.address) try: self._createLiabilityExceutionThread(msg) self.ready.publish(msg) except Exception as e: rospy.logerr("Failed to prepare liability execution thread for %s with exception \"%s\"", msg.address, e) def spin(self): ''' Waiting for the new messages. ''' Thread(target=self._liability_worker, daemon=True).start() rospy.spin()
archive.py
import os import zstandard import orjson as json import time import tarfile import codecs from functools import reduce import jsonlines import io from zipfile import ZipFile import gzip from math import ceil import mmap import multiprocessing as mp from pathlib import Path VALID_EXTENSIONS = [ "openwebtext.tar.xz", "_data.xz", ".dat.zst", ".jsonl", ".jsonl.zst", ".jsonl.zst.tar", ".json.zst", ".txt", ".zip", ".tar.gz", ".json.gz", ".gz", ] def has_valid_extension(file): return any([file.endswith(ext) for ext in VALID_EXTENSIONS]) def _listdir_or_file(x): if isinstance(x, list): return reduce(lambda x, y: x + y, map(listdir_or_file, sorted(x))) if os.path.isfile(x): return [x] elif os.path.isdir(x): return [str(Path(x) / fn) for fn in sorted(os.listdir(x))] else: raise FileNotFoundError(f"{x} not found") def listdir_or_file(x): return list(filter(has_valid_extension, _listdir_or_file(x))) def tarfile_reader(file, streaming=False): # we need our own tarfile parser because `tarfile` doesn't work well for # big tarfiles; it seems to be reading the entire file to get a list of # where all the files are - but we don't need that because we just need # to see each file once. surprisingly, `tarfile` doesn't expose any # facilities for this. the only options are 1. load the entire tarfile # and then query by filename or 2. extract to disk - and neither of # these is what we want. offset = 0 paxfilesize = None while True: hdr = file.read(512) offset += 512 # https://www.gnu.org/software/tar/manual/html_node/Standard.html # end at 135 not 136 because of \0 terminator if hdr[124:135] == b"\0" * 11: # end of record break fname = hdr[:100].split(b"\0")[0] # if the file is too big to fit in the size field, tarfiles will actually # include a PaxHeader with the size in it, applicable to the immediate next file. if paxfilesize is not None: size = paxfilesize paxfilesize = None else: size = int(hdr[124:135], 8) padded_size = ceil(size / 512) * 512 # for handling PaxHeader files (which contain extra metadata about file size) and directories # https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03 type = chr(hdr[156]) if type == "x": meta = file.read(padded_size)[:size] def kv(x): return x.decode("utf-8").split(" ")[1].split("=") paxfileattrs = {kv(x)[0]: kv(x)[1] for x in meta.split(b"\n") if x} paxfilesize = int(paxfileattrs["size"]) offset += padded_size continue elif type != "0" and type != "\0": if streaming: file.seek(padded_size, os.SEEK_CUR) else: file.read(padded_size) offset += padded_size continue if streaming: # skip directory entries if size != 0: mmo = mmap.mmap( file.fileno(), length=offset + size, access=mmap.ACCESS_READ ) mmo.seek(offset) yield mmo file.seek(padded_size, os.SEEK_CUR) else: yield file.read(padded_size)[:size] offset += padded_size def handle_jsonl(jsonl_reader, get_meta, autojoin_paragraphs, para_joiner, key="text"): for ob in jsonl_reader: # naive jsonl where each object is just the string itself, with no meta. For legacy compatibility. if isinstance(ob, str): assert not get_meta yield ob continue text = ob[key] if autojoin_paragraphs and isinstance(text, list): text = para_joiner.join(text) if get_meta: yield text, (ob["meta"] if "meta" in ob else {}) else: yield text class Reader: def __init__(self, in_path): self.in_path = in_path def stream_data(self, get_meta=False, threaded=False): if not threaded: yield from self._stream_data(get_meta) return q = mp.Queue(1000) p = mp.Process(target=self._stream_data_threaded, args=(q, get_meta)) p.start() while p.is_alive(): res = q.get() if res is None: break yield res def _stream_data_threaded(self, q, get_meta=False): for data in self._stream_data(get_meta): q.put(data) q.put(None) def _stream_data(self, get_meta=False, jsonl_key="text"): self.f_name = "" files = listdir_or_file(self.in_path) if not files: raise FileNotFoundError(f"No valid file(s) found in {self.in_path}") for f in files: self.f_name = f if f == "openwebtext.tar.xz": assert not get_meta yield from self.read_owt(f) elif "urlsf_subset" in f and f.endswith("_data.xz"): assert not get_meta yield from self.read_owt_subset(f) elif f.endswith(".dat.zst"): assert not get_meta yield from self.read_dat(f) elif f.endswith(".jsonl"): yield from self.read_jsonl(f, get_meta, key=jsonl_key) elif f.endswith(".jsonl.zst"): yield from self.read_jsonl_zst(f, get_meta, key=jsonl_key) elif f.endswith(".jsonl.zst.tar"): yield from self.read_jsonl_tar(f, get_meta, key=jsonl_key) elif f.endswith(".json.zst"): assert not get_meta yield from self.read_json(f) elif f.endswith(".txt"): assert not get_meta yield from self.read_txt(f) elif f.endswith(".zip"): assert not get_meta yield from self.read_zip(f) elif f.endswith(".tar.gz"): assert not get_meta yield from self.read_tgz(f) elif f.endswith(".json.gz"): assert not get_meta yield from self.read_jsongz(f) elif f.endswith(".gz"): assert not get_meta yield from self.read_gz(f) else: # shouldn't be reached print(f"Skipping {f} as streaming for that filetype is not implemented") def read_txt(self, file): with open(file, "r") as fh: yield fh.read() def read_zip(self, file): archive = ZipFile(file, "r") for f in archive.namelist(): yield archive.read(f).decode("UTF-8") def read_tgz(self, file): gz = gzip.open(file) yield from (x.decode("utf-8") for x in tarfile_reader(gz, streaming=False)) def read_gz(self, file): with gzip.open(file, "rb") as f: for line in f: yield line.decode("utf-8") def read_jsongz(self, file): for line in self.read_gz(file): yield json.loads(line) def read_json(self, file): with open(file, "rb") as fh: cctx = zstandard.ZstdDecompressor() reader = cctx.stream_reader(fh) ob = json.load(reader) yield from ob def read_dat(self, file): with open(file, "rb") as fh: cctx = zstandard.ZstdDecompressor() reader = cctx.stream_reader(fh) while True: ln = reader.read(16).decode("UTF-8") if not ln: break ln = int(ln) yield reader.read(ln).decode("UTF-8") def read_jsonl( self, file, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n", key="text", ): with jsonlines.open(file) as rdr: yield from handle_jsonl( rdr, get_meta, autojoin_paragraphs, para_joiner, key ) def read_jsonl_zst( self, file, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n", key="text", ): with open(file, "rb") as fh: cctx = zstandard.ZstdDecompressor() reader = io.BufferedReader(cctx.stream_reader(fh)) rdr = jsonlines.Reader(reader) yield from handle_jsonl( rdr, get_meta, autojoin_paragraphs, para_joiner, key ) def read_jsonl_tar( self, file, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n", key="text", ): with open(file, "rb") as fh: for f in tarfile_reader(fh, streaming=True): cctx = zstandard.ZstdDecompressor() reader = io.BufferedReader(cctx.stream_reader(f)) rdr = jsonlines.Reader(reader) yield from handle_jsonl( rdr, get_meta, autojoin_paragraphs, para_joiner, key ) f.close() def read_owt(self, file): tar = tarfile.open(file, encoding="utf-8") utf8reader = codecs.getreader("utf-8") for name in tar.getmembers(): fp = tar.extractfile(name) inner_tar = tarfile.open(fileobj=fp, encoding="utf-8") for inner_name in inner_tar.getmembers(): inner_fp = utf8reader(inner_tar.extractfile(inner_name)) contents = inner_fp.read() yield contents def read_owt_subset(self, file): utf8reader = codecs.getreader("utf-8") tar = tarfile.open(file, encoding="utf-8") for name in tar.getmembers(): fp = utf8reader(tar.extractfile(name)) contents = fp.read() yield contents class Archive: def __init__(self, out_dir, compression_level=3): self.out_dir = out_dir os.makedirs(out_dir, exist_ok=True) self.i = 0 self.fh = open(self.out_dir + "/current_chunk_incomplete", "wb") self.cctx = zstandard.ZstdCompressor(level=compression_level, threads=8) self.compressor = self.cctx.stream_writer(self.fh) def add_data(self, data, meta={}): self.compressor.write( json.dumps({"text": data, "meta": meta}).encode("UTF-8") + b"\n" ) def commit(self, archive_name="default"): fname = ( self.out_dir + "/data_" + str(self.i) + "_time" + str(int(time.time())) + "_" + archive_name + ".jsonl.zst" ) self.compressor.flush(zstandard.FLUSH_FRAME) self.fh.flush() self.fh.close() os.rename(self.out_dir + "/current_chunk_incomplete", fname) self.fh = open(self.out_dir + "/current_chunk_incomplete", "wb") self.compressor = self.cctx.stream_writer(self.fh) self.i += 1 class DatArchive: def __init__(self, out_dir): self.out_dir = out_dir os.makedirs(out_dir, exist_ok=True) self.data = [] self.i = 0 if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0: self.i = ( max( map( lambda x: int(x.split("_")[1].split(".")[0]), os.listdir(out_dir), ) ) + 1 ) def add_data(self, data): self.data.append(data) def commit(self, archive_name=None): # TODO: streaming cctx = zstandard.ZstdCompressor(level=3) if archive_name is None: archive_name = str(int(time.time())) res = b"".join( map( lambda x: ("%016d" % len(x)).encode("UTF-8") + x, map(lambda x: x.encode("UTF-8"), self.data), ) ) cdata = cctx.compress(res) with open( self.out_dir + "/data_" + str(self.i) + "_" + archive_name + ".dat.zst", "wb", ) as fh: fh.write(cdata) self.i += 1 self.data = [] class JSONArchive: def __init__(self, out_dir): self.out_dir = out_dir os.makedirs(out_dir, exist_ok=True) self.data = [] self.i = 0 if os.path.exists(out_dir) and len(os.listdir(out_dir)) > 0: self.i = ( max( map( lambda x: int(x.split("_")[1].split(".")[0]), os.listdir(out_dir), ) ) + 1 ) def add_data(self, data): self.data.append(data) def commit(self): cctx = zstandard.ZstdCompressor(level=3) cdata = cctx.compress(json.dumps(self.data).encode("UTF-8")) with open( self.out_dir + "/data_" + str(self.i) + "_" + str(int(time.time())) + ".json.zst", "wb", ) as fh: fh.write(cdata) self.i += 1 self.data = []
streaming_utils.py
import threading import time import os on_rtd = os.environ.get('READTHEDOCS') == 'True' if not on_rtd: import cv2 class VideoCaptureWithoutBuffer(): def __init__(self, cap): self.cap = cap self.frame = None self.streaming = True t = threading.Thread(target=self._reader, args=[]) t.start() def _reader(self): while self.streaming: ret, frame = self.cap.read() if ret: self.frame = frame time.sleep(1/60) def read(self): ret, frame = self.frame is not None, self.frame self.frame = None return ret, frame def release(self): self.streaming = False self.cap.release() class CV2_Listener(object): STREAMING_PORT = 11112 def __init__(self, width=1920, height=1080, port=None, with_buffer=True, app_ip=None): self.width = width self.height = height self.streaming = False self.with_buffer = with_buffer self.app_ip = app_ip def isStreaming(self): return self.streaming def startLandingStream(self, cap_str): print(cap_str) if self.with_buffer: print("video cap with hdmi") self.cap = cv2.VideoCapture(cap_str, cv2.CAP_V4L) else: print("video cap without buffer") self.cap = VideoCaptureWithoutBuffer(cv2.VideoCapture(cap_str, cv2.CAP_DSHOW)) self.streaming = True def start(self, protocol, ip, port): cap_str = "%s://%s:%s" % (protocol, ip, port) print(cap_str) if self.with_buffer: print("video cap with buffer") self.cap = cv2.VideoCapture(cap_str) else: print("video cap without buffer") self.cap = VideoCaptureWithoutBuffer(cv2.VideoCapture(cap_str)) self.streaming = True def getFrame(self): ret, frame = self.cap.read() return None if self.cap is None or not ret else cv2.resize(frame, (self.width, self.height), interpolation = cv2.INTER_AREA) def stop(self): self.cap.release() self.streaming = False def getVideoCapture(self): return self.cap def getWidth(self): return self.width def getHeight(self): return self.width
main.py
from flask import Flask, make_response, jsonify, send_from_directory, redirect import camera from PIL import Image, ImageFont, ImageDraw import io import os from threading import Thread from datetime import datetime, timedelta DIR = "photos" app = Flask(__name__) def find_image(label, offset): global DIR benchmark_timestamp = datetime.now() - timedelta(days=3) files = sorted( [(f, datetime.fromtimestamp(os.path.getmtime(os.path.join(DIR, f)))) for f in os.listdir(DIR)], key=lambda x: x[1] ) [os.remove(os.path.join(DIR, f[0])) for f in files if f[1] < benchmark_timestamp] files = [f[0].replace(".jpg", "") for f in files] index = len(files) - 1 if label is not None: index = files.index(label) if not (0 <= index + offset < len(files)): return "No more photos" return redirect("/#" + files[index + offset]) def annotate_image(image_data, date_taken, recording=False): if date_taken is None: return ("No camera feed", 503) text = date_taken.strftime("%Y-%m-%d %H:%M:%S") if recording: text += " | MOTION DETECTED" img = Image.open(image_data) draw = ImageDraw.Draw(img) #font = ImageFont.truetype("sans-serif.ttf", 20) draw.text((5, 5),text,(255,255,0)) result = io.BytesIO() img.save(result, format="JPEG") r = make_response(result.getvalue()) r.headers.set("Content-Type", "image/jpeg") return r @app.route("/") def home(): return app.send_static_file("index.html") @app.route("/photos/<label>") def photos(label): global DIR path = os.path.join( DIR, label.replace(".", "_").replace("/", "_").replace("\\", "_") + ".jpg" ) age = datetime.fromtimestamp(os.path.getmtime(path)) return annotate_image(path, age, True) @app.route("/next/<label>") def next_photo(label): return find_image(label, 1) @app.route("/prev/<label>") def prev_photo(label): return find_image(label, -1) @app.route("/last") def last_photo(): global DIR benchmark_timestamp = datetime.now() - timedelta(days=3) files = sorted( [(f, datetime.fromtimestamp(os.path.getmtime(os.path.join(DIR, f)))) for f in os.listdir(DIR)], key=lambda x: x[1] ) index = len(files) - 1 while index > 0: if (files[index][1] - files[index - 1][1]).total_seconds() > 5: return redirect("/#" + files[index][0].replace(".jpg", "")) index = index - 1 return "No captures found" @app.route("/climate") def get_climate(): with open("climate/latest.json", "r") as f: return f.read() @app.route("/camera") def feed(): age, data, recording = camera.capture_image() if data is None: return "Error no data" return annotate_image(data, age, recording) if __name__ == "__main__": camera_thread = Thread(target=camera.camera_loop) camera_thread.start() app.run(host="0.0.0.0", port=8080)
view_tester.py
# Copyright 2017-2020 TensorHub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division import json import logging import sys import threading import time try: from urllib.request import urlopen except ImportError: # pylint: disable=import-error from urllib2 import urlopen from guild import util log = logging.getLogger("guild") def start_tester(host, port, exit=None): if exit is None: exit = lambda _code: None tester = threading.Thread(target=_test_view, args=(host, port, exit)) tester.start() def _test_view(host, port, exit): view_url = util.local_server_url(host, port) try: _wait_for(view_url) _test_runs(view_url) _test_tensorboard(view_url) except Exception: log.exception("testing %s", view_url) exit(1) else: exit(0) def _wait_for(url): _urlread(url) def _test_runs(view_url): runs_url = "{}/runs".format(view_url) sys.stdout.write("Testing %s\n" % runs_url) runs_str = _urlread(runs_url) runs = json.loads(runs_str.decode()) sys.stdout.write(" - Got %i Guild run(s)\n" % len(runs)) sys.stdout.flush() def _test_tensorboard(view_url): tb_init_url = "{}/tb/0/".format(view_url) sys.stdout.write("Initializing TensorBoard at %s\n" % tb_init_url) _urlread(tb_init_url) runs_url = "{}/tb/0/data/runs".format(view_url) sys.stdout.write("Testing %s\n" % runs_url) runs_str = _urlread(runs_url) runs = json.loads(runs_str.decode()) sys.stdout.write(" - Got %i TensorBoard run(s)\n" % len(runs)) sys.stdout.flush() def _urlread(url): timeout = time.time() + 5 # 5 seconds to connect while time.time() < timeout: try: f = urlopen(url) except Exception as e: if 'refused' not in str(e): raise time.sleep(1) else: return f.read() raise RuntimeError("connect timeout")
start.py
#!/usr/bin/python3 import jinja2 import os import socket import glob import multiprocessing import tenacity import logging as log import sys from tenacity import retry from podop import run_server log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) def start_podop(): os.setuid(8) run_server(0, "dovecot", "/tmp/podop.socket", [ ("quota", "url", "http://admin/internal/dovecot/§"), ("auth", "url", "http://admin/internal/dovecot/§"), ("sieve", "url", "http://admin/internal/dovecot/§"), ]) def convert(src, dst): logger = log.getLogger("convert()") logger.debug("Source: %s, Destination: %s", src, dst) open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) @retry( stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5), before=tenacity.before_log(log.getLogger("tenacity.retry"), log.DEBUG), before_sleep=tenacity.before_sleep_log(log.getLogger("tenacity.retry"), log.INFO), after=tenacity.after_log(log.getLogger("tenacity.retry"), log.DEBUG) ) def resolve(hostname): logger = log.getLogger("resolve()") logger.info(hostname) return socket.gethostbyname(hostname) # Actual startup script os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front")) os.environ["REDIS_ADDRESS"] = resolve(os.environ.get("REDIS_ADDRESS", "redis")) if os.environ["WEBMAIL"] != "none": os.environ["WEBMAIL_ADDRESS"] = resolve(os.environ.get("WEBMAIL_ADDRESS", "webmail")) for dovecot_file in glob.glob("/conf/*.conf"): convert(dovecot_file, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() os.system("chown mail:mail /mail") os.system("chown -R mail:mail /var/lib/dovecot /conf") os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])